drivers/crypto: invoke probing finish function
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2021 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41
42 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47
48 uint8_t dpaa_cryptodev_driver_id;
49
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53         if (!ctx->fd_status) {
54                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55         } else {
56                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58         }
59 }
60
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
63 {
64         struct dpaa_sec_op_ctx *ctx;
65         int i, retval;
66
67         retval = rte_mempool_get(
68                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
69                         (void **)(&ctx));
70         if (!ctx || retval) {
71                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
72                 return NULL;
73         }
74         /*
75          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78          * each packet, memset is costlier than dcbz_64().
79          */
80         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81                 dcbz_64(&ctx->job.sg[i]);
82
83         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
85
86         return ctx;
87 }
88
89 static void
90 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
91                    struct qman_fq *fq,
92                    const struct qm_mr_entry *msg)
93 {
94         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
95                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
96 }
97
98 /* initialize the queue with dest chan as caam chan so that
99  * all the packets in this queue could be dispatched into caam
100  */
101 static int
102 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
103                  uint32_t fqid_out)
104 {
105         struct qm_mcc_initfq fq_opts;
106         uint32_t flags;
107         int ret = -1;
108
109         /* Clear FQ options */
110         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
111
112         flags = QMAN_INITFQ_FLAG_SCHED;
113         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
114                           QM_INITFQ_WE_CONTEXTB;
115
116         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
117         fq_opts.fqd.context_b = fqid_out;
118         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
119         fq_opts.fqd.dest.wq = 0;
120
121         fq_in->cb.ern  = ern_sec_fq_handler;
122
123         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
124
125         ret = qman_init_fq(fq_in, flags, &fq_opts);
126         if (unlikely(ret != 0))
127                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
128
129         return ret;
130 }
131
132 /* something is put into in_fq and caam put the crypto result into out_fq */
133 static enum qman_cb_dqrr_result
134 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
135                   struct qman_fq *fq __always_unused,
136                   const struct qm_dqrr_entry *dqrr)
137 {
138         const struct qm_fd *fd;
139         struct dpaa_sec_job *job;
140         struct dpaa_sec_op_ctx *ctx;
141
142         if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
143                 return qman_cb_dqrr_defer;
144
145         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
146                 return qman_cb_dqrr_consume;
147
148         fd = &dqrr->fd;
149         /* sg is embedded in an op ctx,
150          * sg[0] is for output
151          * sg[1] for input
152          */
153         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
154
155         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
156         ctx->fd_status = fd->status;
157         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
158                 struct qm_sg_entry *sg_out;
159                 uint32_t len;
160                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
161                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
162
163                 sg_out = &job->sg[0];
164                 hw_sg_to_cpu(sg_out);
165                 len = sg_out->length;
166                 mbuf->pkt_len = len;
167                 while (mbuf->next != NULL) {
168                         len -= mbuf->data_len;
169                         mbuf = mbuf->next;
170                 }
171                 mbuf->data_len = len;
172         }
173         DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
174         dpaa_sec_op_ending(ctx);
175
176         return qman_cb_dqrr_consume;
177 }
178
179 /* caam result is put into this queue */
180 static int
181 dpaa_sec_init_tx(struct qman_fq *fq)
182 {
183         int ret;
184         struct qm_mcc_initfq opts;
185         uint32_t flags;
186
187         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
188                 QMAN_FQ_FLAG_DYNAMIC_FQID;
189
190         ret = qman_create_fq(0, flags, fq);
191         if (unlikely(ret)) {
192                 DPAA_SEC_ERR("qman_create_fq failed");
193                 return ret;
194         }
195
196         memset(&opts, 0, sizeof(opts));
197         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
198                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
199
200         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
201
202         fq->cb.dqrr = dqrr_out_fq_cb_rx;
203         fq->cb.ern  = ern_sec_fq_handler;
204
205         ret = qman_init_fq(fq, 0, &opts);
206         if (unlikely(ret)) {
207                 DPAA_SEC_ERR("unable to init caam source fq!");
208                 return ret;
209         }
210
211         return ret;
212 }
213
214 static inline int is_aead(dpaa_sec_session *ses)
215 {
216         return ((ses->cipher_alg == 0) &&
217                 (ses->auth_alg == 0) &&
218                 (ses->aead_alg != 0));
219 }
220
221 static inline int is_encode(dpaa_sec_session *ses)
222 {
223         return ses->dir == DIR_ENC;
224 }
225
226 static inline int is_decode(dpaa_sec_session *ses)
227 {
228         return ses->dir == DIR_DEC;
229 }
230
231 #ifdef RTE_LIB_SECURITY
232 static int
233 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
234 {
235         struct alginfo authdata = {0}, cipherdata = {0};
236         struct sec_cdb *cdb = &ses->cdb;
237         struct alginfo *p_authdata = NULL;
238         int32_t shared_desc_len = 0;
239 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
240         int swap = false;
241 #else
242         int swap = true;
243 #endif
244
245         cipherdata.key = (size_t)ses->cipher_key.data;
246         cipherdata.keylen = ses->cipher_key.length;
247         cipherdata.key_enc_flags = 0;
248         cipherdata.key_type = RTA_DATA_IMM;
249         cipherdata.algtype = ses->cipher_key.alg;
250         cipherdata.algmode = ses->cipher_key.algmode;
251
252         if (ses->auth_alg) {
253                 authdata.key = (size_t)ses->auth_key.data;
254                 authdata.keylen = ses->auth_key.length;
255                 authdata.key_enc_flags = 0;
256                 authdata.key_type = RTA_DATA_IMM;
257                 authdata.algtype = ses->auth_key.alg;
258                 authdata.algmode = ses->auth_key.algmode;
259
260                 p_authdata = &authdata;
261         }
262
263         if (ses->pdcp.sdap_enabled) {
264                 int nb_keys_to_inline =
265                                 rta_inline_pdcp_sdap_query(authdata.algtype,
266                                         cipherdata.algtype,
267                                         ses->pdcp.sn_size,
268                                         ses->pdcp.hfn_ovd);
269                 if (nb_keys_to_inline >= 1) {
270                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
271                                                 (size_t)cipherdata.key);
272                         cipherdata.key_type = RTA_DATA_PTR;
273                 }
274                 if (nb_keys_to_inline >= 2) {
275                         authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
276                                                 (size_t)authdata.key);
277                         authdata.key_type = RTA_DATA_PTR;
278                 }
279         } else {
280                 if (rta_inline_pdcp_query(authdata.algtype,
281                                         cipherdata.algtype,
282                                         ses->pdcp.sn_size,
283                                         ses->pdcp.hfn_ovd)) {
284                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
285                                                 (size_t)cipherdata.key);
286                         cipherdata.key_type = RTA_DATA_PTR;
287                 }
288         }
289
290         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
291                 if (ses->dir == DIR_ENC)
292                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
293                                         cdb->sh_desc, 1, swap,
294                                         ses->pdcp.hfn,
295                                         ses->pdcp.sn_size,
296                                         ses->pdcp.bearer,
297                                         ses->pdcp.pkt_dir,
298                                         ses->pdcp.hfn_threshold,
299                                         &cipherdata, &authdata,
300                                         0);
301                 else if (ses->dir == DIR_DEC)
302                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
303                                         cdb->sh_desc, 1, swap,
304                                         ses->pdcp.hfn,
305                                         ses->pdcp.sn_size,
306                                         ses->pdcp.bearer,
307                                         ses->pdcp.pkt_dir,
308                                         ses->pdcp.hfn_threshold,
309                                         &cipherdata, &authdata,
310                                         0);
311         } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
312                 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
313                                                      1, swap, &authdata);
314         } else {
315                 if (ses->dir == DIR_ENC) {
316                         if (ses->pdcp.sdap_enabled)
317                                 shared_desc_len =
318                                         cnstr_shdsc_pdcp_sdap_u_plane_encap(
319                                                 cdb->sh_desc, 1, swap,
320                                                 ses->pdcp.sn_size,
321                                                 ses->pdcp.hfn,
322                                                 ses->pdcp.bearer,
323                                                 ses->pdcp.pkt_dir,
324                                                 ses->pdcp.hfn_threshold,
325                                                 &cipherdata, p_authdata, 0);
326                         else
327                                 shared_desc_len =
328                                         cnstr_shdsc_pdcp_u_plane_encap(
329                                                 cdb->sh_desc, 1, swap,
330                                                 ses->pdcp.sn_size,
331                                                 ses->pdcp.hfn,
332                                                 ses->pdcp.bearer,
333                                                 ses->pdcp.pkt_dir,
334                                                 ses->pdcp.hfn_threshold,
335                                                 &cipherdata, p_authdata, 0);
336                 } else if (ses->dir == DIR_DEC) {
337                         if (ses->pdcp.sdap_enabled)
338                                 shared_desc_len =
339                                         cnstr_shdsc_pdcp_sdap_u_plane_decap(
340                                                 cdb->sh_desc, 1, swap,
341                                                 ses->pdcp.sn_size,
342                                                 ses->pdcp.hfn,
343                                                 ses->pdcp.bearer,
344                                                 ses->pdcp.pkt_dir,
345                                                 ses->pdcp.hfn_threshold,
346                                                 &cipherdata, p_authdata, 0);
347                         else
348                                 shared_desc_len =
349                                         cnstr_shdsc_pdcp_u_plane_decap(
350                                                 cdb->sh_desc, 1, swap,
351                                                 ses->pdcp.sn_size,
352                                                 ses->pdcp.hfn,
353                                                 ses->pdcp.bearer,
354                                                 ses->pdcp.pkt_dir,
355                                                 ses->pdcp.hfn_threshold,
356                                                 &cipherdata, p_authdata, 0);
357                 }
358         }
359         return shared_desc_len;
360 }
361
362 /* prepare ipsec proto command block of the session */
363 static int
364 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
365 {
366         struct alginfo cipherdata = {0}, authdata = {0};
367         struct sec_cdb *cdb = &ses->cdb;
368         int32_t shared_desc_len = 0;
369         int err;
370 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
371         int swap = false;
372 #else
373         int swap = true;
374 #endif
375
376         cipherdata.key = (size_t)ses->cipher_key.data;
377         cipherdata.keylen = ses->cipher_key.length;
378         cipherdata.key_enc_flags = 0;
379         cipherdata.key_type = RTA_DATA_IMM;
380         cipherdata.algtype = ses->cipher_key.alg;
381         cipherdata.algmode = ses->cipher_key.algmode;
382
383         if (ses->auth_key.length) {
384                 authdata.key = (size_t)ses->auth_key.data;
385                 authdata.keylen = ses->auth_key.length;
386                 authdata.key_enc_flags = 0;
387                 authdata.key_type = RTA_DATA_IMM;
388                 authdata.algtype = ses->auth_key.alg;
389                 authdata.algmode = ses->auth_key.algmode;
390         }
391
392         cdb->sh_desc[0] = cipherdata.keylen;
393         cdb->sh_desc[1] = authdata.keylen;
394         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
395                                DESC_JOB_IO_LEN,
396                                (unsigned int *)cdb->sh_desc,
397                                &cdb->sh_desc[2], 2);
398
399         if (err < 0) {
400                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
401                 return err;
402         }
403         if (cdb->sh_desc[2] & 1)
404                 cipherdata.key_type = RTA_DATA_IMM;
405         else {
406                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
407                                         (void *)(size_t)cipherdata.key);
408                 cipherdata.key_type = RTA_DATA_PTR;
409         }
410         if (cdb->sh_desc[2] & (1<<1))
411                 authdata.key_type = RTA_DATA_IMM;
412         else {
413                 authdata.key = (size_t)rte_dpaa_mem_vtop(
414                                         (void *)(size_t)authdata.key);
415                 authdata.key_type = RTA_DATA_PTR;
416         }
417
418         cdb->sh_desc[0] = 0;
419         cdb->sh_desc[1] = 0;
420         cdb->sh_desc[2] = 0;
421         if (ses->dir == DIR_ENC) {
422                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
423                                 cdb->sh_desc,
424                                 true, swap, SHR_SERIAL,
425                                 &ses->encap_pdb,
426                                 (uint8_t *)&ses->ip4_hdr,
427                                 &cipherdata, &authdata);
428         } else if (ses->dir == DIR_DEC) {
429                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
430                                 cdb->sh_desc,
431                                 true, swap, SHR_SERIAL,
432                                 &ses->decap_pdb,
433                                 &cipherdata, &authdata);
434         }
435         return shared_desc_len;
436 }
437 #endif
438 /* prepare command block of the session */
439 static int
440 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
441 {
442         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
443         int32_t shared_desc_len = 0;
444         struct sec_cdb *cdb = &ses->cdb;
445         int err;
446 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
447         int swap = false;
448 #else
449         int swap = true;
450 #endif
451
452         memset(cdb, 0, sizeof(struct sec_cdb));
453
454         switch (ses->ctxt) {
455 #ifdef RTE_LIB_SECURITY
456         case DPAA_SEC_IPSEC:
457                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
458                 break;
459         case DPAA_SEC_PDCP:
460                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
461                 break;
462 #endif
463         case DPAA_SEC_CIPHER:
464                 alginfo_c.key = (size_t)ses->cipher_key.data;
465                 alginfo_c.keylen = ses->cipher_key.length;
466                 alginfo_c.key_enc_flags = 0;
467                 alginfo_c.key_type = RTA_DATA_IMM;
468                 alginfo_c.algtype = ses->cipher_key.alg;
469                 alginfo_c.algmode = ses->cipher_key.algmode;
470
471                 switch (ses->cipher_alg) {
472                 case RTE_CRYPTO_CIPHER_AES_CBC:
473                 case RTE_CRYPTO_CIPHER_3DES_CBC:
474                 case RTE_CRYPTO_CIPHER_DES_CBC:
475                 case RTE_CRYPTO_CIPHER_AES_CTR:
476                 case RTE_CRYPTO_CIPHER_3DES_CTR:
477                         shared_desc_len = cnstr_shdsc_blkcipher(
478                                         cdb->sh_desc, true,
479                                         swap, SHR_NEVER, &alginfo_c,
480                                         ses->iv.length,
481                                         ses->dir);
482                         break;
483                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
484                         shared_desc_len = cnstr_shdsc_snow_f8(
485                                         cdb->sh_desc, true, swap,
486                                         &alginfo_c,
487                                         ses->dir);
488                         break;
489                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
490                         shared_desc_len = cnstr_shdsc_zuce(
491                                         cdb->sh_desc, true, swap,
492                                         &alginfo_c,
493                                         ses->dir);
494                         break;
495                 default:
496                         DPAA_SEC_ERR("unsupported cipher alg %d",
497                                      ses->cipher_alg);
498                         return -ENOTSUP;
499                 }
500                 break;
501         case DPAA_SEC_AUTH:
502                 alginfo_a.key = (size_t)ses->auth_key.data;
503                 alginfo_a.keylen = ses->auth_key.length;
504                 alginfo_a.key_enc_flags = 0;
505                 alginfo_a.key_type = RTA_DATA_IMM;
506                 alginfo_a.algtype = ses->auth_key.alg;
507                 alginfo_a.algmode = ses->auth_key.algmode;
508                 switch (ses->auth_alg) {
509                 case RTE_CRYPTO_AUTH_MD5:
510                 case RTE_CRYPTO_AUTH_SHA1:
511                 case RTE_CRYPTO_AUTH_SHA224:
512                 case RTE_CRYPTO_AUTH_SHA256:
513                 case RTE_CRYPTO_AUTH_SHA384:
514                 case RTE_CRYPTO_AUTH_SHA512:
515                         shared_desc_len = cnstr_shdsc_hash(
516                                                 cdb->sh_desc, true,
517                                                 swap, SHR_NEVER, &alginfo_a,
518                                                 !ses->dir,
519                                                 ses->digest_length);
520                         break;
521                 case RTE_CRYPTO_AUTH_MD5_HMAC:
522                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
523                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
524                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
525                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
526                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
527                         shared_desc_len = cnstr_shdsc_hmac(
528                                                 cdb->sh_desc, true,
529                                                 swap, SHR_NEVER, &alginfo_a,
530                                                 !ses->dir,
531                                                 ses->digest_length);
532                         break;
533                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
534                         shared_desc_len = cnstr_shdsc_snow_f9(
535                                                 cdb->sh_desc, true, swap,
536                                                 &alginfo_a,
537                                                 !ses->dir,
538                                                 ses->digest_length);
539                         break;
540                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
541                         shared_desc_len = cnstr_shdsc_zuca(
542                                                 cdb->sh_desc, true, swap,
543                                                 &alginfo_a,
544                                                 !ses->dir,
545                                                 ses->digest_length);
546                         break;
547                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
548                 case RTE_CRYPTO_AUTH_AES_CMAC:
549                         shared_desc_len = cnstr_shdsc_aes_mac(
550                                                 cdb->sh_desc,
551                                                 true, swap, SHR_NEVER,
552                                                 &alginfo_a,
553                                                 !ses->dir,
554                                                 ses->digest_length);
555                         break;
556                 default:
557                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
558                 }
559                 break;
560         case DPAA_SEC_AEAD:
561                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
562                         DPAA_SEC_ERR("not supported aead alg");
563                         return -ENOTSUP;
564                 }
565                 alginfo.key = (size_t)ses->aead_key.data;
566                 alginfo.keylen = ses->aead_key.length;
567                 alginfo.key_enc_flags = 0;
568                 alginfo.key_type = RTA_DATA_IMM;
569                 alginfo.algtype = ses->aead_key.alg;
570                 alginfo.algmode = ses->aead_key.algmode;
571
572                 if (ses->dir == DIR_ENC)
573                         shared_desc_len = cnstr_shdsc_gcm_encap(
574                                         cdb->sh_desc, true, swap, SHR_NEVER,
575                                         &alginfo,
576                                         ses->iv.length,
577                                         ses->digest_length);
578                 else
579                         shared_desc_len = cnstr_shdsc_gcm_decap(
580                                         cdb->sh_desc, true, swap, SHR_NEVER,
581                                         &alginfo,
582                                         ses->iv.length,
583                                         ses->digest_length);
584                 break;
585         case DPAA_SEC_CIPHER_HASH:
586                 alginfo_c.key = (size_t)ses->cipher_key.data;
587                 alginfo_c.keylen = ses->cipher_key.length;
588                 alginfo_c.key_enc_flags = 0;
589                 alginfo_c.key_type = RTA_DATA_IMM;
590                 alginfo_c.algtype = ses->cipher_key.alg;
591                 alginfo_c.algmode = ses->cipher_key.algmode;
592
593                 alginfo_a.key = (size_t)ses->auth_key.data;
594                 alginfo_a.keylen = ses->auth_key.length;
595                 alginfo_a.key_enc_flags = 0;
596                 alginfo_a.key_type = RTA_DATA_IMM;
597                 alginfo_a.algtype = ses->auth_key.alg;
598                 alginfo_a.algmode = ses->auth_key.algmode;
599
600                 cdb->sh_desc[0] = alginfo_c.keylen;
601                 cdb->sh_desc[1] = alginfo_a.keylen;
602                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
603                                        DESC_JOB_IO_LEN,
604                                        (unsigned int *)cdb->sh_desc,
605                                        &cdb->sh_desc[2], 2);
606
607                 if (err < 0) {
608                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
609                         return err;
610                 }
611                 if (cdb->sh_desc[2] & 1)
612                         alginfo_c.key_type = RTA_DATA_IMM;
613                 else {
614                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
615                                                 (void *)(size_t)alginfo_c.key);
616                         alginfo_c.key_type = RTA_DATA_PTR;
617                 }
618                 if (cdb->sh_desc[2] & (1<<1))
619                         alginfo_a.key_type = RTA_DATA_IMM;
620                 else {
621                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
622                                                 (void *)(size_t)alginfo_a.key);
623                         alginfo_a.key_type = RTA_DATA_PTR;
624                 }
625                 cdb->sh_desc[0] = 0;
626                 cdb->sh_desc[1] = 0;
627                 cdb->sh_desc[2] = 0;
628                 /* Auth_only_len is set as 0 here and it will be
629                  * overwritten in fd for each packet.
630                  */
631                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
632                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
633                                 ses->iv.length,
634                                 ses->digest_length, ses->dir);
635                 break;
636         case DPAA_SEC_HASH_CIPHER:
637         default:
638                 DPAA_SEC_ERR("error: Unsupported session");
639                 return -ENOTSUP;
640         }
641
642         if (shared_desc_len < 0) {
643                 DPAA_SEC_ERR("error in preparing command block");
644                 return shared_desc_len;
645         }
646
647         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
648         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
649         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
650
651         return 0;
652 }
653
654 /* qp is lockless, should be accessed by only one thread */
655 static int
656 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
657 {
658         struct qman_fq *fq;
659         unsigned int pkts = 0;
660         int num_rx_bufs, ret;
661         struct qm_dqrr_entry *dq;
662         uint32_t vdqcr_flags = 0;
663
664         fq = &qp->outq;
665         /*
666          * Until request for four buffers, we provide exact number of buffers.
667          * Otherwise we do not set the QM_VDQCR_EXACT flag.
668          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
669          * requested, so we request two less in this case.
670          */
671         if (nb_ops < 4) {
672                 vdqcr_flags = QM_VDQCR_EXACT;
673                 num_rx_bufs = nb_ops;
674         } else {
675                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
676                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
677         }
678         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
679         if (ret)
680                 return 0;
681
682         do {
683                 const struct qm_fd *fd;
684                 struct dpaa_sec_job *job;
685                 struct dpaa_sec_op_ctx *ctx;
686                 struct rte_crypto_op *op;
687
688                 dq = qman_dequeue(fq);
689                 if (!dq)
690                         continue;
691
692                 fd = &dq->fd;
693                 /* sg is embedded in an op ctx,
694                  * sg[0] is for output
695                  * sg[1] for input
696                  */
697                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
698
699                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
700                 ctx->fd_status = fd->status;
701                 op = ctx->op;
702                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
703                         struct qm_sg_entry *sg_out;
704                         uint32_t len;
705                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
706                                                 op->sym->m_src : op->sym->m_dst;
707
708                         sg_out = &job->sg[0];
709                         hw_sg_to_cpu(sg_out);
710                         len = sg_out->length;
711                         mbuf->pkt_len = len;
712                         while (mbuf->next != NULL) {
713                                 len -= mbuf->data_len;
714                                 mbuf = mbuf->next;
715                         }
716                         mbuf->data_len = len;
717                 }
718                 if (!ctx->fd_status) {
719                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
720                 } else {
721                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
722                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
723                 }
724                 ops[pkts++] = op;
725
726                 /* report op status to sym->op and then free the ctx memeory */
727                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
728
729                 qman_dqrr_consume(fq, dq);
730         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
731
732         return pkts;
733 }
734
735 static inline struct dpaa_sec_job *
736 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
737 {
738         struct rte_crypto_sym_op *sym = op->sym;
739         struct rte_mbuf *mbuf = sym->m_src;
740         struct dpaa_sec_job *cf;
741         struct dpaa_sec_op_ctx *ctx;
742         struct qm_sg_entry *sg, *out_sg, *in_sg;
743         phys_addr_t start_addr;
744         uint8_t *old_digest, extra_segs;
745         int data_len, data_offset;
746
747         data_len = sym->auth.data.length;
748         data_offset = sym->auth.data.offset;
749
750         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
751             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
752                 if ((data_len & 7) || (data_offset & 7)) {
753                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
754                         return NULL;
755                 }
756
757                 data_len = data_len >> 3;
758                 data_offset = data_offset >> 3;
759         }
760
761         if (is_decode(ses))
762                 extra_segs = 3;
763         else
764                 extra_segs = 2;
765
766         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
767                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
768                                 MAX_SG_ENTRIES);
769                 return NULL;
770         }
771         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
772         if (!ctx)
773                 return NULL;
774
775         cf = &ctx->job;
776         ctx->op = op;
777         old_digest = ctx->digest;
778
779         /* output */
780         out_sg = &cf->sg[0];
781         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
782         out_sg->length = ses->digest_length;
783         cpu_to_hw_sg(out_sg);
784
785         /* input */
786         in_sg = &cf->sg[1];
787         /* need to extend the input to a compound frame */
788         in_sg->extension = 1;
789         in_sg->final = 1;
790         in_sg->length = data_len;
791         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
792
793         /* 1st seg */
794         sg = in_sg + 1;
795
796         if (ses->iv.length) {
797                 uint8_t *iv_ptr;
798
799                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
800                                                    ses->iv.offset);
801
802                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
803                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
804                         sg->length = 12;
805                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
806                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
807                         sg->length = 8;
808                 } else {
809                         sg->length = ses->iv.length;
810                 }
811                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
812                 in_sg->length += sg->length;
813                 cpu_to_hw_sg(sg);
814                 sg++;
815         }
816
817         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
818         sg->offset = data_offset;
819
820         if (data_len <= (mbuf->data_len - data_offset)) {
821                 sg->length = data_len;
822         } else {
823                 sg->length = mbuf->data_len - data_offset;
824
825                 /* remaining i/p segs */
826                 while ((data_len = data_len - sg->length) &&
827                        (mbuf = mbuf->next)) {
828                         cpu_to_hw_sg(sg);
829                         sg++;
830                         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
831                         if (data_len > mbuf->data_len)
832                                 sg->length = mbuf->data_len;
833                         else
834                                 sg->length = data_len;
835                 }
836         }
837
838         if (is_decode(ses)) {
839                 /* Digest verification case */
840                 cpu_to_hw_sg(sg);
841                 sg++;
842                 rte_memcpy(old_digest, sym->auth.digest.data,
843                                 ses->digest_length);
844                 start_addr = rte_dpaa_mem_vtop(old_digest);
845                 qm_sg_entry_set64(sg, start_addr);
846                 sg->length = ses->digest_length;
847                 in_sg->length += ses->digest_length;
848         }
849         sg->final = 1;
850         cpu_to_hw_sg(sg);
851         cpu_to_hw_sg(in_sg);
852
853         return cf;
854 }
855
856 /**
857  * packet looks like:
858  *              |<----data_len------->|
859  *    |ip_header|ah_header|icv|payload|
860  *              ^
861  *              |
862  *         mbuf->pkt.data
863  */
864 static inline struct dpaa_sec_job *
865 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
866 {
867         struct rte_crypto_sym_op *sym = op->sym;
868         struct rte_mbuf *mbuf = sym->m_src;
869         struct dpaa_sec_job *cf;
870         struct dpaa_sec_op_ctx *ctx;
871         struct qm_sg_entry *sg, *in_sg;
872         rte_iova_t start_addr;
873         uint8_t *old_digest;
874         int data_len, data_offset;
875
876         data_len = sym->auth.data.length;
877         data_offset = sym->auth.data.offset;
878
879         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
880             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
881                 if ((data_len & 7) || (data_offset & 7)) {
882                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
883                         return NULL;
884                 }
885
886                 data_len = data_len >> 3;
887                 data_offset = data_offset >> 3;
888         }
889
890         ctx = dpaa_sec_alloc_ctx(ses, 4);
891         if (!ctx)
892                 return NULL;
893
894         cf = &ctx->job;
895         ctx->op = op;
896         old_digest = ctx->digest;
897
898         start_addr = rte_pktmbuf_iova(mbuf);
899         /* output */
900         sg = &cf->sg[0];
901         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
902         sg->length = ses->digest_length;
903         cpu_to_hw_sg(sg);
904
905         /* input */
906         in_sg = &cf->sg[1];
907         /* need to extend the input to a compound frame */
908         in_sg->extension = 1;
909         in_sg->final = 1;
910         in_sg->length = data_len;
911         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
912         sg = &cf->sg[2];
913
914         if (ses->iv.length) {
915                 uint8_t *iv_ptr;
916
917                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
918                                                    ses->iv.offset);
919
920                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
921                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
922                         sg->length = 12;
923                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
924                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
925                         sg->length = 8;
926                 } else {
927                         sg->length = ses->iv.length;
928                 }
929                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
930                 in_sg->length += sg->length;
931                 cpu_to_hw_sg(sg);
932                 sg++;
933         }
934
935         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
936         sg->offset = data_offset;
937         sg->length = data_len;
938
939         if (is_decode(ses)) {
940                 /* Digest verification case */
941                 cpu_to_hw_sg(sg);
942                 /* hash result or digest, save digest first */
943                 rte_memcpy(old_digest, sym->auth.digest.data,
944                                 ses->digest_length);
945                 /* let's check digest by hw */
946                 start_addr = rte_dpaa_mem_vtop(old_digest);
947                 sg++;
948                 qm_sg_entry_set64(sg, start_addr);
949                 sg->length = ses->digest_length;
950                 in_sg->length += ses->digest_length;
951         }
952         sg->final = 1;
953         cpu_to_hw_sg(sg);
954         cpu_to_hw_sg(in_sg);
955
956         return cf;
957 }
958
959 static inline struct dpaa_sec_job *
960 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
961 {
962         struct rte_crypto_sym_op *sym = op->sym;
963         struct dpaa_sec_job *cf;
964         struct dpaa_sec_op_ctx *ctx;
965         struct qm_sg_entry *sg, *out_sg, *in_sg;
966         struct rte_mbuf *mbuf;
967         uint8_t req_segs;
968         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
969                         ses->iv.offset);
970         int data_len, data_offset;
971
972         data_len = sym->cipher.data.length;
973         data_offset = sym->cipher.data.offset;
974
975         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
976                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
977                 if ((data_len & 7) || (data_offset & 7)) {
978                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
979                         return NULL;
980                 }
981
982                 data_len = data_len >> 3;
983                 data_offset = data_offset >> 3;
984         }
985
986         if (sym->m_dst) {
987                 mbuf = sym->m_dst;
988                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
989         } else {
990                 mbuf = sym->m_src;
991                 req_segs = mbuf->nb_segs * 2 + 3;
992         }
993         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
994                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
995                                 MAX_SG_ENTRIES);
996                 return NULL;
997         }
998
999         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1000         if (!ctx)
1001                 return NULL;
1002
1003         cf = &ctx->job;
1004         ctx->op = op;
1005
1006         /* output */
1007         out_sg = &cf->sg[0];
1008         out_sg->extension = 1;
1009         out_sg->length = data_len;
1010         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1011         cpu_to_hw_sg(out_sg);
1012
1013         /* 1st seg */
1014         sg = &cf->sg[2];
1015         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1016         sg->length = mbuf->data_len - data_offset;
1017         sg->offset = data_offset;
1018
1019         /* Successive segs */
1020         mbuf = mbuf->next;
1021         while (mbuf) {
1022                 cpu_to_hw_sg(sg);
1023                 sg++;
1024                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1025                 sg->length = mbuf->data_len;
1026                 mbuf = mbuf->next;
1027         }
1028         sg->final = 1;
1029         cpu_to_hw_sg(sg);
1030
1031         /* input */
1032         mbuf = sym->m_src;
1033         in_sg = &cf->sg[1];
1034         in_sg->extension = 1;
1035         in_sg->final = 1;
1036         in_sg->length = data_len + ses->iv.length;
1037
1038         sg++;
1039         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1040         cpu_to_hw_sg(in_sg);
1041
1042         /* IV */
1043         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1044         sg->length = ses->iv.length;
1045         cpu_to_hw_sg(sg);
1046
1047         /* 1st seg */
1048         sg++;
1049         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1050         sg->length = mbuf->data_len - data_offset;
1051         sg->offset = data_offset;
1052
1053         /* Successive segs */
1054         mbuf = mbuf->next;
1055         while (mbuf) {
1056                 cpu_to_hw_sg(sg);
1057                 sg++;
1058                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1059                 sg->length = mbuf->data_len;
1060                 mbuf = mbuf->next;
1061         }
1062         sg->final = 1;
1063         cpu_to_hw_sg(sg);
1064
1065         return cf;
1066 }
1067
1068 static inline struct dpaa_sec_job *
1069 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1070 {
1071         struct rte_crypto_sym_op *sym = op->sym;
1072         struct dpaa_sec_job *cf;
1073         struct dpaa_sec_op_ctx *ctx;
1074         struct qm_sg_entry *sg;
1075         rte_iova_t src_start_addr, dst_start_addr;
1076         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1077                         ses->iv.offset);
1078         int data_len, data_offset;
1079
1080         data_len = sym->cipher.data.length;
1081         data_offset = sym->cipher.data.offset;
1082
1083         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1084                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1085                 if ((data_len & 7) || (data_offset & 7)) {
1086                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1087                         return NULL;
1088                 }
1089
1090                 data_len = data_len >> 3;
1091                 data_offset = data_offset >> 3;
1092         }
1093
1094         ctx = dpaa_sec_alloc_ctx(ses, 4);
1095         if (!ctx)
1096                 return NULL;
1097
1098         cf = &ctx->job;
1099         ctx->op = op;
1100
1101         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1102
1103         if (sym->m_dst)
1104                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1105         else
1106                 dst_start_addr = src_start_addr;
1107
1108         /* output */
1109         sg = &cf->sg[0];
1110         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1111         sg->length = data_len + ses->iv.length;
1112         cpu_to_hw_sg(sg);
1113
1114         /* input */
1115         sg = &cf->sg[1];
1116
1117         /* need to extend the input to a compound frame */
1118         sg->extension = 1;
1119         sg->final = 1;
1120         sg->length = data_len + ses->iv.length;
1121         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1122         cpu_to_hw_sg(sg);
1123
1124         sg = &cf->sg[2];
1125         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1126         sg->length = ses->iv.length;
1127         cpu_to_hw_sg(sg);
1128
1129         sg++;
1130         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1131         sg->length = data_len;
1132         sg->final = 1;
1133         cpu_to_hw_sg(sg);
1134
1135         return cf;
1136 }
1137
1138 static inline struct dpaa_sec_job *
1139 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1140 {
1141         struct rte_crypto_sym_op *sym = op->sym;
1142         struct dpaa_sec_job *cf;
1143         struct dpaa_sec_op_ctx *ctx;
1144         struct qm_sg_entry *sg, *out_sg, *in_sg;
1145         struct rte_mbuf *mbuf;
1146         uint8_t req_segs;
1147         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1148                         ses->iv.offset);
1149
1150         if (sym->m_dst) {
1151                 mbuf = sym->m_dst;
1152                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1153         } else {
1154                 mbuf = sym->m_src;
1155                 req_segs = mbuf->nb_segs * 2 + 4;
1156         }
1157
1158         if (ses->auth_only_len)
1159                 req_segs++;
1160
1161         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1162                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1163                                 MAX_SG_ENTRIES);
1164                 return NULL;
1165         }
1166
1167         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1168         if (!ctx)
1169                 return NULL;
1170
1171         cf = &ctx->job;
1172         ctx->op = op;
1173
1174         rte_prefetch0(cf->sg);
1175
1176         /* output */
1177         out_sg = &cf->sg[0];
1178         out_sg->extension = 1;
1179         if (is_encode(ses))
1180                 out_sg->length = sym->aead.data.length + ses->digest_length;
1181         else
1182                 out_sg->length = sym->aead.data.length;
1183
1184         /* output sg entries */
1185         sg = &cf->sg[2];
1186         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1187         cpu_to_hw_sg(out_sg);
1188
1189         /* 1st seg */
1190         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1191         sg->length = mbuf->data_len - sym->aead.data.offset;
1192         sg->offset = sym->aead.data.offset;
1193
1194         /* Successive segs */
1195         mbuf = mbuf->next;
1196         while (mbuf) {
1197                 cpu_to_hw_sg(sg);
1198                 sg++;
1199                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1200                 sg->length = mbuf->data_len;
1201                 mbuf = mbuf->next;
1202         }
1203         sg->length -= ses->digest_length;
1204
1205         if (is_encode(ses)) {
1206                 cpu_to_hw_sg(sg);
1207                 /* set auth output */
1208                 sg++;
1209                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1210                 sg->length = ses->digest_length;
1211         }
1212         sg->final = 1;
1213         cpu_to_hw_sg(sg);
1214
1215         /* input */
1216         mbuf = sym->m_src;
1217         in_sg = &cf->sg[1];
1218         in_sg->extension = 1;
1219         in_sg->final = 1;
1220         if (is_encode(ses))
1221                 in_sg->length = ses->iv.length + sym->aead.data.length
1222                                                         + ses->auth_only_len;
1223         else
1224                 in_sg->length = ses->iv.length + sym->aead.data.length
1225                                 + ses->auth_only_len + ses->digest_length;
1226
1227         /* input sg entries */
1228         sg++;
1229         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1230         cpu_to_hw_sg(in_sg);
1231
1232         /* 1st seg IV */
1233         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1234         sg->length = ses->iv.length;
1235         cpu_to_hw_sg(sg);
1236
1237         /* 2nd seg auth only */
1238         if (ses->auth_only_len) {
1239                 sg++;
1240                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1241                 sg->length = ses->auth_only_len;
1242                 cpu_to_hw_sg(sg);
1243         }
1244
1245         /* 3rd seg */
1246         sg++;
1247         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1248         sg->length = mbuf->data_len - sym->aead.data.offset;
1249         sg->offset = sym->aead.data.offset;
1250
1251         /* Successive segs */
1252         mbuf = mbuf->next;
1253         while (mbuf) {
1254                 cpu_to_hw_sg(sg);
1255                 sg++;
1256                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1257                 sg->length = mbuf->data_len;
1258                 mbuf = mbuf->next;
1259         }
1260
1261         if (is_decode(ses)) {
1262                 cpu_to_hw_sg(sg);
1263                 sg++;
1264                 memcpy(ctx->digest, sym->aead.digest.data,
1265                         ses->digest_length);
1266                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1267                 sg->length = ses->digest_length;
1268         }
1269         sg->final = 1;
1270         cpu_to_hw_sg(sg);
1271
1272         return cf;
1273 }
1274
1275 static inline struct dpaa_sec_job *
1276 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1277 {
1278         struct rte_crypto_sym_op *sym = op->sym;
1279         struct dpaa_sec_job *cf;
1280         struct dpaa_sec_op_ctx *ctx;
1281         struct qm_sg_entry *sg;
1282         uint32_t length = 0;
1283         rte_iova_t src_start_addr, dst_start_addr;
1284         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1285                         ses->iv.offset);
1286
1287         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1288
1289         if (sym->m_dst)
1290                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1291         else
1292                 dst_start_addr = src_start_addr;
1293
1294         ctx = dpaa_sec_alloc_ctx(ses, 7);
1295         if (!ctx)
1296                 return NULL;
1297
1298         cf = &ctx->job;
1299         ctx->op = op;
1300
1301         /* input */
1302         rte_prefetch0(cf->sg);
1303         sg = &cf->sg[2];
1304         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1305         if (is_encode(ses)) {
1306                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1307                 sg->length = ses->iv.length;
1308                 length += sg->length;
1309                 cpu_to_hw_sg(sg);
1310
1311                 sg++;
1312                 if (ses->auth_only_len) {
1313                         qm_sg_entry_set64(sg,
1314                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1315                         sg->length = ses->auth_only_len;
1316                         length += sg->length;
1317                         cpu_to_hw_sg(sg);
1318                         sg++;
1319                 }
1320                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1321                 sg->length = sym->aead.data.length;
1322                 length += sg->length;
1323                 sg->final = 1;
1324                 cpu_to_hw_sg(sg);
1325         } else {
1326                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1327                 sg->length = ses->iv.length;
1328                 length += sg->length;
1329                 cpu_to_hw_sg(sg);
1330
1331                 sg++;
1332                 if (ses->auth_only_len) {
1333                         qm_sg_entry_set64(sg,
1334                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1335                         sg->length = ses->auth_only_len;
1336                         length += sg->length;
1337                         cpu_to_hw_sg(sg);
1338                         sg++;
1339                 }
1340                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1341                 sg->length = sym->aead.data.length;
1342                 length += sg->length;
1343                 cpu_to_hw_sg(sg);
1344
1345                 memcpy(ctx->digest, sym->aead.digest.data,
1346                        ses->digest_length);
1347                 sg++;
1348
1349                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1350                 sg->length = ses->digest_length;
1351                 length += sg->length;
1352                 sg->final = 1;
1353                 cpu_to_hw_sg(sg);
1354         }
1355         /* input compound frame */
1356         cf->sg[1].length = length;
1357         cf->sg[1].extension = 1;
1358         cf->sg[1].final = 1;
1359         cpu_to_hw_sg(&cf->sg[1]);
1360
1361         /* output */
1362         sg++;
1363         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1364         qm_sg_entry_set64(sg,
1365                 dst_start_addr + sym->aead.data.offset);
1366         sg->length = sym->aead.data.length;
1367         length = sg->length;
1368         if (is_encode(ses)) {
1369                 cpu_to_hw_sg(sg);
1370                 /* set auth output */
1371                 sg++;
1372                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1373                 sg->length = ses->digest_length;
1374                 length += sg->length;
1375         }
1376         sg->final = 1;
1377         cpu_to_hw_sg(sg);
1378
1379         /* output compound frame */
1380         cf->sg[0].length = length;
1381         cf->sg[0].extension = 1;
1382         cpu_to_hw_sg(&cf->sg[0]);
1383
1384         return cf;
1385 }
1386
1387 static inline struct dpaa_sec_job *
1388 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1389 {
1390         struct rte_crypto_sym_op *sym = op->sym;
1391         struct dpaa_sec_job *cf;
1392         struct dpaa_sec_op_ctx *ctx;
1393         struct qm_sg_entry *sg, *out_sg, *in_sg;
1394         struct rte_mbuf *mbuf;
1395         uint8_t req_segs;
1396         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1397                         ses->iv.offset);
1398
1399         if (sym->m_dst) {
1400                 mbuf = sym->m_dst;
1401                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1402         } else {
1403                 mbuf = sym->m_src;
1404                 req_segs = mbuf->nb_segs * 2 + 4;
1405         }
1406
1407         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1408                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1409                                 MAX_SG_ENTRIES);
1410                 return NULL;
1411         }
1412
1413         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1414         if (!ctx)
1415                 return NULL;
1416
1417         cf = &ctx->job;
1418         ctx->op = op;
1419
1420         rte_prefetch0(cf->sg);
1421
1422         /* output */
1423         out_sg = &cf->sg[0];
1424         out_sg->extension = 1;
1425         if (is_encode(ses))
1426                 out_sg->length = sym->auth.data.length + ses->digest_length;
1427         else
1428                 out_sg->length = sym->auth.data.length;
1429
1430         /* output sg entries */
1431         sg = &cf->sg[2];
1432         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1433         cpu_to_hw_sg(out_sg);
1434
1435         /* 1st seg */
1436         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1437         sg->length = mbuf->data_len - sym->auth.data.offset;
1438         sg->offset = sym->auth.data.offset;
1439
1440         /* Successive segs */
1441         mbuf = mbuf->next;
1442         while (mbuf) {
1443                 cpu_to_hw_sg(sg);
1444                 sg++;
1445                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1446                 sg->length = mbuf->data_len;
1447                 mbuf = mbuf->next;
1448         }
1449         sg->length -= ses->digest_length;
1450
1451         if (is_encode(ses)) {
1452                 cpu_to_hw_sg(sg);
1453                 /* set auth output */
1454                 sg++;
1455                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1456                 sg->length = ses->digest_length;
1457         }
1458         sg->final = 1;
1459         cpu_to_hw_sg(sg);
1460
1461         /* input */
1462         mbuf = sym->m_src;
1463         in_sg = &cf->sg[1];
1464         in_sg->extension = 1;
1465         in_sg->final = 1;
1466         if (is_encode(ses))
1467                 in_sg->length = ses->iv.length + sym->auth.data.length;
1468         else
1469                 in_sg->length = ses->iv.length + sym->auth.data.length
1470                                                 + ses->digest_length;
1471
1472         /* input sg entries */
1473         sg++;
1474         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1475         cpu_to_hw_sg(in_sg);
1476
1477         /* 1st seg IV */
1478         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1479         sg->length = ses->iv.length;
1480         cpu_to_hw_sg(sg);
1481
1482         /* 2nd seg */
1483         sg++;
1484         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1485         sg->length = mbuf->data_len - sym->auth.data.offset;
1486         sg->offset = sym->auth.data.offset;
1487
1488         /* Successive segs */
1489         mbuf = mbuf->next;
1490         while (mbuf) {
1491                 cpu_to_hw_sg(sg);
1492                 sg++;
1493                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1494                 sg->length = mbuf->data_len;
1495                 mbuf = mbuf->next;
1496         }
1497
1498         sg->length -= ses->digest_length;
1499         if (is_decode(ses)) {
1500                 cpu_to_hw_sg(sg);
1501                 sg++;
1502                 memcpy(ctx->digest, sym->auth.digest.data,
1503                         ses->digest_length);
1504                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1505                 sg->length = ses->digest_length;
1506         }
1507         sg->final = 1;
1508         cpu_to_hw_sg(sg);
1509
1510         return cf;
1511 }
1512
1513 static inline struct dpaa_sec_job *
1514 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1515 {
1516         struct rte_crypto_sym_op *sym = op->sym;
1517         struct dpaa_sec_job *cf;
1518         struct dpaa_sec_op_ctx *ctx;
1519         struct qm_sg_entry *sg;
1520         rte_iova_t src_start_addr, dst_start_addr;
1521         uint32_t length = 0;
1522         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1523                         ses->iv.offset);
1524
1525         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1526         if (sym->m_dst)
1527                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1528         else
1529                 dst_start_addr = src_start_addr;
1530
1531         ctx = dpaa_sec_alloc_ctx(ses, 7);
1532         if (!ctx)
1533                 return NULL;
1534
1535         cf = &ctx->job;
1536         ctx->op = op;
1537
1538         /* input */
1539         rte_prefetch0(cf->sg);
1540         sg = &cf->sg[2];
1541         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1542         if (is_encode(ses)) {
1543                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1544                 sg->length = ses->iv.length;
1545                 length += sg->length;
1546                 cpu_to_hw_sg(sg);
1547
1548                 sg++;
1549                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1550                 sg->length = sym->auth.data.length;
1551                 length += sg->length;
1552                 sg->final = 1;
1553                 cpu_to_hw_sg(sg);
1554         } else {
1555                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1556                 sg->length = ses->iv.length;
1557                 length += sg->length;
1558                 cpu_to_hw_sg(sg);
1559
1560                 sg++;
1561
1562                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1563                 sg->length = sym->auth.data.length;
1564                 length += sg->length;
1565                 cpu_to_hw_sg(sg);
1566
1567                 memcpy(ctx->digest, sym->auth.digest.data,
1568                        ses->digest_length);
1569                 sg++;
1570
1571                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1572                 sg->length = ses->digest_length;
1573                 length += sg->length;
1574                 sg->final = 1;
1575                 cpu_to_hw_sg(sg);
1576         }
1577         /* input compound frame */
1578         cf->sg[1].length = length;
1579         cf->sg[1].extension = 1;
1580         cf->sg[1].final = 1;
1581         cpu_to_hw_sg(&cf->sg[1]);
1582
1583         /* output */
1584         sg++;
1585         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1586         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1587         sg->length = sym->cipher.data.length;
1588         length = sg->length;
1589         if (is_encode(ses)) {
1590                 cpu_to_hw_sg(sg);
1591                 /* set auth output */
1592                 sg++;
1593                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1594                 sg->length = ses->digest_length;
1595                 length += sg->length;
1596         }
1597         sg->final = 1;
1598         cpu_to_hw_sg(sg);
1599
1600         /* output compound frame */
1601         cf->sg[0].length = length;
1602         cf->sg[0].extension = 1;
1603         cpu_to_hw_sg(&cf->sg[0]);
1604
1605         return cf;
1606 }
1607
1608 #ifdef RTE_LIB_SECURITY
1609 static inline struct dpaa_sec_job *
1610 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1611 {
1612         struct rte_crypto_sym_op *sym = op->sym;
1613         struct dpaa_sec_job *cf;
1614         struct dpaa_sec_op_ctx *ctx;
1615         struct qm_sg_entry *sg;
1616         phys_addr_t src_start_addr, dst_start_addr;
1617
1618         ctx = dpaa_sec_alloc_ctx(ses, 2);
1619         if (!ctx)
1620                 return NULL;
1621         cf = &ctx->job;
1622         ctx->op = op;
1623
1624         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1625
1626         if (sym->m_dst)
1627                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1628         else
1629                 dst_start_addr = src_start_addr;
1630
1631         /* input */
1632         sg = &cf->sg[1];
1633         qm_sg_entry_set64(sg, src_start_addr);
1634         sg->length = sym->m_src->pkt_len;
1635         sg->final = 1;
1636         cpu_to_hw_sg(sg);
1637
1638         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1639         /* output */
1640         sg = &cf->sg[0];
1641         qm_sg_entry_set64(sg, dst_start_addr);
1642         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1643         cpu_to_hw_sg(sg);
1644
1645         return cf;
1646 }
1647
1648 static inline struct dpaa_sec_job *
1649 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1650 {
1651         struct rte_crypto_sym_op *sym = op->sym;
1652         struct dpaa_sec_job *cf;
1653         struct dpaa_sec_op_ctx *ctx;
1654         struct qm_sg_entry *sg, *out_sg, *in_sg;
1655         struct rte_mbuf *mbuf;
1656         uint8_t req_segs;
1657         uint32_t in_len = 0, out_len = 0;
1658
1659         if (sym->m_dst)
1660                 mbuf = sym->m_dst;
1661         else
1662                 mbuf = sym->m_src;
1663
1664         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1665         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1666                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1667                                 MAX_SG_ENTRIES);
1668                 return NULL;
1669         }
1670
1671         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1672         if (!ctx)
1673                 return NULL;
1674         cf = &ctx->job;
1675         ctx->op = op;
1676         /* output */
1677         out_sg = &cf->sg[0];
1678         out_sg->extension = 1;
1679         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1680
1681         /* 1st seg */
1682         sg = &cf->sg[2];
1683         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1684         sg->offset = 0;
1685
1686         /* Successive segs */
1687         while (mbuf->next) {
1688                 sg->length = mbuf->data_len;
1689                 out_len += sg->length;
1690                 mbuf = mbuf->next;
1691                 cpu_to_hw_sg(sg);
1692                 sg++;
1693                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1694                 sg->offset = 0;
1695         }
1696         sg->length = mbuf->buf_len - mbuf->data_off;
1697         out_len += sg->length;
1698         sg->final = 1;
1699         cpu_to_hw_sg(sg);
1700
1701         out_sg->length = out_len;
1702         cpu_to_hw_sg(out_sg);
1703
1704         /* input */
1705         mbuf = sym->m_src;
1706         in_sg = &cf->sg[1];
1707         in_sg->extension = 1;
1708         in_sg->final = 1;
1709         in_len = mbuf->data_len;
1710
1711         sg++;
1712         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1713
1714         /* 1st seg */
1715         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1716         sg->length = mbuf->data_len;
1717         sg->offset = 0;
1718
1719         /* Successive segs */
1720         mbuf = mbuf->next;
1721         while (mbuf) {
1722                 cpu_to_hw_sg(sg);
1723                 sg++;
1724                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1725                 sg->length = mbuf->data_len;
1726                 sg->offset = 0;
1727                 in_len += sg->length;
1728                 mbuf = mbuf->next;
1729         }
1730         sg->final = 1;
1731         cpu_to_hw_sg(sg);
1732
1733         in_sg->length = in_len;
1734         cpu_to_hw_sg(in_sg);
1735
1736         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1737
1738         return cf;
1739 }
1740 #endif
1741
1742 static uint16_t
1743 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1744                        uint16_t nb_ops)
1745 {
1746         /* Function to transmit the frames to given device and queuepair */
1747         uint32_t loop;
1748         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1749         uint16_t num_tx = 0;
1750         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1751         uint32_t frames_to_send;
1752         struct rte_crypto_op *op;
1753         struct dpaa_sec_job *cf;
1754         dpaa_sec_session *ses;
1755         uint16_t auth_hdr_len, auth_tail_len;
1756         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1757         struct qman_fq *inq[DPAA_SEC_BURST];
1758
1759         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1760                 if (rte_dpaa_portal_init((void *)0)) {
1761                         DPAA_SEC_ERR("Failure in affining portal");
1762                         return 0;
1763                 }
1764         }
1765
1766         while (nb_ops) {
1767                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1768                                 DPAA_SEC_BURST : nb_ops;
1769                 for (loop = 0; loop < frames_to_send; loop++) {
1770                         op = *(ops++);
1771                         if (*dpaa_seqn(op->sym->m_src) != 0) {
1772                                 index = *dpaa_seqn(op->sym->m_src) - 1;
1773                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1774                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1775                                         flags[loop] = ((index & 0x0f) << 8);
1776                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1777                                         DPAA_PER_LCORE_DQRR_SIZE--;
1778                                         DPAA_PER_LCORE_DQRR_HELD &=
1779                                                                 ~(1 << index);
1780                                 }
1781                         }
1782
1783                         switch (op->sess_type) {
1784                         case RTE_CRYPTO_OP_WITH_SESSION:
1785                                 ses = (dpaa_sec_session *)
1786                                         get_sym_session_private_data(
1787                                                 op->sym->session,
1788                                                 dpaa_cryptodev_driver_id);
1789                                 break;
1790 #ifdef RTE_LIB_SECURITY
1791                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1792                                 ses = (dpaa_sec_session *)
1793                                         get_sec_session_private_data(
1794                                                         op->sym->sec_session);
1795                                 break;
1796 #endif
1797                         default:
1798                                 DPAA_SEC_DP_ERR(
1799                                         "sessionless crypto op not supported");
1800                                 frames_to_send = loop;
1801                                 nb_ops = loop;
1802                                 goto send_pkts;
1803                         }
1804
1805                         if (!ses) {
1806                                 DPAA_SEC_DP_ERR("session not available");
1807                                 frames_to_send = loop;
1808                                 nb_ops = loop;
1809                                 goto send_pkts;
1810                         }
1811
1812                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1813                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1814                                         frames_to_send = loop;
1815                                         nb_ops = loop;
1816                                         goto send_pkts;
1817                                 }
1818                         } else if (unlikely(ses->qp[rte_lcore_id() %
1819                                                 MAX_DPAA_CORES] != qp)) {
1820                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1821                                         " New qp = %p\n",
1822                                         ses->qp[rte_lcore_id() %
1823                                         MAX_DPAA_CORES], qp);
1824                                 frames_to_send = loop;
1825                                 nb_ops = loop;
1826                                 goto send_pkts;
1827                         }
1828
1829                         auth_hdr_len = op->sym->auth.data.length -
1830                                                 op->sym->cipher.data.length;
1831                         auth_tail_len = 0;
1832
1833                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1834                                   ((op->sym->m_dst == NULL) ||
1835                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1836                                 switch (ses->ctxt) {
1837 #ifdef RTE_LIB_SECURITY
1838                                 case DPAA_SEC_PDCP:
1839                                 case DPAA_SEC_IPSEC:
1840                                         cf = build_proto(op, ses);
1841                                         break;
1842 #endif
1843                                 case DPAA_SEC_AUTH:
1844                                         cf = build_auth_only(op, ses);
1845                                         break;
1846                                 case DPAA_SEC_CIPHER:
1847                                         cf = build_cipher_only(op, ses);
1848                                         break;
1849                                 case DPAA_SEC_AEAD:
1850                                         cf = build_cipher_auth_gcm(op, ses);
1851                                         auth_hdr_len = ses->auth_only_len;
1852                                         break;
1853                                 case DPAA_SEC_CIPHER_HASH:
1854                                         auth_hdr_len =
1855                                                 op->sym->cipher.data.offset
1856                                                 - op->sym->auth.data.offset;
1857                                         auth_tail_len =
1858                                                 op->sym->auth.data.length
1859                                                 - op->sym->cipher.data.length
1860                                                 - auth_hdr_len;
1861                                         cf = build_cipher_auth(op, ses);
1862                                         break;
1863                                 default:
1864                                         DPAA_SEC_DP_ERR("not supported ops");
1865                                         frames_to_send = loop;
1866                                         nb_ops = loop;
1867                                         goto send_pkts;
1868                                 }
1869                         } else {
1870                                 switch (ses->ctxt) {
1871 #ifdef RTE_LIB_SECURITY
1872                                 case DPAA_SEC_PDCP:
1873                                 case DPAA_SEC_IPSEC:
1874                                         cf = build_proto_sg(op, ses);
1875                                         break;
1876 #endif
1877                                 case DPAA_SEC_AUTH:
1878                                         cf = build_auth_only_sg(op, ses);
1879                                         break;
1880                                 case DPAA_SEC_CIPHER:
1881                                         cf = build_cipher_only_sg(op, ses);
1882                                         break;
1883                                 case DPAA_SEC_AEAD:
1884                                         cf = build_cipher_auth_gcm_sg(op, ses);
1885                                         auth_hdr_len = ses->auth_only_len;
1886                                         break;
1887                                 case DPAA_SEC_CIPHER_HASH:
1888                                         auth_hdr_len =
1889                                                 op->sym->cipher.data.offset
1890                                                 - op->sym->auth.data.offset;
1891                                         auth_tail_len =
1892                                                 op->sym->auth.data.length
1893                                                 - op->sym->cipher.data.length
1894                                                 - auth_hdr_len;
1895                                         cf = build_cipher_auth_sg(op, ses);
1896                                         break;
1897                                 default:
1898                                         DPAA_SEC_DP_ERR("not supported ops");
1899                                         frames_to_send = loop;
1900                                         nb_ops = loop;
1901                                         goto send_pkts;
1902                                 }
1903                         }
1904                         if (unlikely(!cf)) {
1905                                 frames_to_send = loop;
1906                                 nb_ops = loop;
1907                                 goto send_pkts;
1908                         }
1909
1910                         fd = &fds[loop];
1911                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1912                         fd->opaque_addr = 0;
1913                         fd->cmd = 0;
1914                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1915                         fd->_format1 = qm_fd_compound;
1916                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1917
1918                         /* Auth_only_len is set as 0 in descriptor and it is
1919                          * overwritten here in the fd.cmd which will update
1920                          * the DPOVRD reg.
1921                          */
1922                         if (auth_hdr_len || auth_tail_len) {
1923                                 fd->cmd = 0x80000000;
1924                                 fd->cmd |=
1925                                         ((auth_tail_len << 16) | auth_hdr_len);
1926                         }
1927
1928 #ifdef RTE_LIB_SECURITY
1929                         /* In case of PDCP, per packet HFN is stored in
1930                          * mbuf priv after sym_op.
1931                          */
1932                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1933                                 fd->cmd = 0x80000000 |
1934                                         *((uint32_t *)((uint8_t *)op +
1935                                         ses->pdcp.hfn_ovd_offset));
1936                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1937                                         *((uint32_t *)((uint8_t *)op +
1938                                         ses->pdcp.hfn_ovd_offset)),
1939                                         ses->pdcp.hfn_ovd);
1940                         }
1941 #endif
1942                 }
1943 send_pkts:
1944                 loop = 0;
1945                 while (loop < frames_to_send) {
1946                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1947                                         &flags[loop], frames_to_send - loop);
1948                 }
1949                 nb_ops -= frames_to_send;
1950                 num_tx += frames_to_send;
1951         }
1952
1953         dpaa_qp->tx_pkts += num_tx;
1954         dpaa_qp->tx_errs += nb_ops - num_tx;
1955
1956         return num_tx;
1957 }
1958
1959 static uint16_t
1960 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1961                        uint16_t nb_ops)
1962 {
1963         uint16_t num_rx;
1964         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1965
1966         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1967                 if (rte_dpaa_portal_init((void *)0)) {
1968                         DPAA_SEC_ERR("Failure in affining portal");
1969                         return 0;
1970                 }
1971         }
1972
1973         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1974
1975         dpaa_qp->rx_pkts += num_rx;
1976         dpaa_qp->rx_errs += nb_ops - num_rx;
1977
1978         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1979
1980         return num_rx;
1981 }
1982
1983 /** Release queue pair */
1984 static int
1985 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1986                             uint16_t qp_id)
1987 {
1988         struct dpaa_sec_dev_private *internals;
1989         struct dpaa_sec_qp *qp = NULL;
1990
1991         PMD_INIT_FUNC_TRACE();
1992
1993         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1994
1995         internals = dev->data->dev_private;
1996         if (qp_id >= internals->max_nb_queue_pairs) {
1997                 DPAA_SEC_ERR("Max supported qpid %d",
1998                              internals->max_nb_queue_pairs);
1999                 return -EINVAL;
2000         }
2001
2002         qp = &internals->qps[qp_id];
2003         rte_mempool_free(qp->ctx_pool);
2004         qp->internals = NULL;
2005         dev->data->queue_pairs[qp_id] = NULL;
2006
2007         return 0;
2008 }
2009
2010 /** Setup a queue pair */
2011 static int
2012 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2013                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2014                 __rte_unused int socket_id)
2015 {
2016         struct dpaa_sec_dev_private *internals;
2017         struct dpaa_sec_qp *qp = NULL;
2018         char str[20];
2019
2020         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2021
2022         internals = dev->data->dev_private;
2023         if (qp_id >= internals->max_nb_queue_pairs) {
2024                 DPAA_SEC_ERR("Max supported qpid %d",
2025                              internals->max_nb_queue_pairs);
2026                 return -EINVAL;
2027         }
2028
2029         qp = &internals->qps[qp_id];
2030         qp->internals = internals;
2031         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2032                         dev->data->dev_id, qp_id);
2033         if (!qp->ctx_pool) {
2034                 qp->ctx_pool = rte_mempool_create((const char *)str,
2035                                                         CTX_POOL_NUM_BUFS,
2036                                                         CTX_POOL_BUF_SIZE,
2037                                                         CTX_POOL_CACHE_SIZE, 0,
2038                                                         NULL, NULL, NULL, NULL,
2039                                                         SOCKET_ID_ANY, 0);
2040                 if (!qp->ctx_pool) {
2041                         DPAA_SEC_ERR("%s create failed\n", str);
2042                         return -ENOMEM;
2043                 }
2044         } else
2045                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2046                                 dev->data->dev_id, qp_id);
2047         dev->data->queue_pairs[qp_id] = qp;
2048
2049         return 0;
2050 }
2051
2052 /** Returns the size of session structure */
2053 static unsigned int
2054 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2055 {
2056         PMD_INIT_FUNC_TRACE();
2057
2058         return sizeof(dpaa_sec_session);
2059 }
2060
2061 static int
2062 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2063                      struct rte_crypto_sym_xform *xform,
2064                      dpaa_sec_session *session)
2065 {
2066         session->ctxt = DPAA_SEC_CIPHER;
2067         session->cipher_alg = xform->cipher.algo;
2068         session->iv.length = xform->cipher.iv.length;
2069         session->iv.offset = xform->cipher.iv.offset;
2070         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2071                                                RTE_CACHE_LINE_SIZE);
2072         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2073                 DPAA_SEC_ERR("No Memory for cipher key");
2074                 return -ENOMEM;
2075         }
2076         session->cipher_key.length = xform->cipher.key.length;
2077
2078         memcpy(session->cipher_key.data, xform->cipher.key.data,
2079                xform->cipher.key.length);
2080         switch (xform->cipher.algo) {
2081         case RTE_CRYPTO_CIPHER_AES_CBC:
2082                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2083                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2084                 break;
2085         case RTE_CRYPTO_CIPHER_DES_CBC:
2086                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2087                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2088                 break;
2089         case RTE_CRYPTO_CIPHER_3DES_CBC:
2090                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2091                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2092                 break;
2093         case RTE_CRYPTO_CIPHER_AES_CTR:
2094                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2095                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2096                 break;
2097         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2098                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2099                 break;
2100         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2101                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2102                 break;
2103         default:
2104                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2105                               xform->cipher.algo);
2106                 return -ENOTSUP;
2107         }
2108         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2109                         DIR_ENC : DIR_DEC;
2110
2111         return 0;
2112 }
2113
2114 static int
2115 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2116                    struct rte_crypto_sym_xform *xform,
2117                    dpaa_sec_session *session)
2118 {
2119         session->ctxt = DPAA_SEC_AUTH;
2120         session->auth_alg = xform->auth.algo;
2121         session->auth_key.length = xform->auth.key.length;
2122         if (xform->auth.key.length) {
2123                 session->auth_key.data =
2124                                 rte_zmalloc(NULL, xform->auth.key.length,
2125                                              RTE_CACHE_LINE_SIZE);
2126                 if (session->auth_key.data == NULL) {
2127                         DPAA_SEC_ERR("No Memory for auth key");
2128                         return -ENOMEM;
2129                 }
2130                 memcpy(session->auth_key.data, xform->auth.key.data,
2131                                 xform->auth.key.length);
2132
2133         }
2134         session->digest_length = xform->auth.digest_length;
2135         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2136                 session->iv.offset = xform->auth.iv.offset;
2137                 session->iv.length = xform->auth.iv.length;
2138         }
2139
2140         switch (xform->auth.algo) {
2141         case RTE_CRYPTO_AUTH_SHA1:
2142                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2143                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2144                 break;
2145         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2146                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2147                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2148                 break;
2149         case RTE_CRYPTO_AUTH_MD5:
2150                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2151                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2152                 break;
2153         case RTE_CRYPTO_AUTH_MD5_HMAC:
2154                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2155                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2156                 break;
2157         case RTE_CRYPTO_AUTH_SHA224:
2158                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2159                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2160                 break;
2161         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2162                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2163                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2164                 break;
2165         case RTE_CRYPTO_AUTH_SHA256:
2166                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2167                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2168                 break;
2169         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2170                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2171                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2172                 break;
2173         case RTE_CRYPTO_AUTH_SHA384:
2174                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2175                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2176                 break;
2177         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2178                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2179                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2180                 break;
2181         case RTE_CRYPTO_AUTH_SHA512:
2182                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2183                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2184                 break;
2185         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2186                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2187                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2188                 break;
2189         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2190                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2191                 session->auth_key.algmode = OP_ALG_AAI_F9;
2192                 break;
2193         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2194                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2195                 session->auth_key.algmode = OP_ALG_AAI_F9;
2196                 break;
2197         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2198                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2199                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2200                 break;
2201         case RTE_CRYPTO_AUTH_AES_CMAC:
2202                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2203                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2204                 break;
2205         default:
2206                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2207                               xform->auth.algo);
2208                 return -ENOTSUP;
2209         }
2210
2211         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2212                         DIR_ENC : DIR_DEC;
2213
2214         return 0;
2215 }
2216
2217 static int
2218 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2219                    struct rte_crypto_sym_xform *xform,
2220                    dpaa_sec_session *session)
2221 {
2222
2223         struct rte_crypto_cipher_xform *cipher_xform;
2224         struct rte_crypto_auth_xform *auth_xform;
2225
2226         session->ctxt = DPAA_SEC_CIPHER_HASH;
2227         if (session->auth_cipher_text) {
2228                 cipher_xform = &xform->cipher;
2229                 auth_xform = &xform->next->auth;
2230         } else {
2231                 cipher_xform = &xform->next->cipher;
2232                 auth_xform = &xform->auth;
2233         }
2234
2235         /* Set IV parameters */
2236         session->iv.offset = cipher_xform->iv.offset;
2237         session->iv.length = cipher_xform->iv.length;
2238
2239         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2240                                                RTE_CACHE_LINE_SIZE);
2241         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2242                 DPAA_SEC_ERR("No Memory for cipher key");
2243                 return -ENOMEM;
2244         }
2245         session->cipher_key.length = cipher_xform->key.length;
2246         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2247                                              RTE_CACHE_LINE_SIZE);
2248         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2249                 DPAA_SEC_ERR("No Memory for auth key");
2250                 return -ENOMEM;
2251         }
2252         session->auth_key.length = auth_xform->key.length;
2253         memcpy(session->cipher_key.data, cipher_xform->key.data,
2254                cipher_xform->key.length);
2255         memcpy(session->auth_key.data, auth_xform->key.data,
2256                auth_xform->key.length);
2257
2258         session->digest_length = auth_xform->digest_length;
2259         session->auth_alg = auth_xform->algo;
2260
2261         switch (auth_xform->algo) {
2262         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2263                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2264                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2265                 break;
2266         case RTE_CRYPTO_AUTH_MD5_HMAC:
2267                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2268                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2269                 break;
2270         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2271                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2272                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2273                 break;
2274         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2275                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2276                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2277                 break;
2278         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2279                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2280                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2281                 break;
2282         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2283                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2284                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2285                 break;
2286         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2287                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2288                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2289                 break;
2290         case RTE_CRYPTO_AUTH_AES_CMAC:
2291                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2292                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2293                 break;
2294         default:
2295                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2296                               auth_xform->algo);
2297                 return -ENOTSUP;
2298         }
2299
2300         session->cipher_alg = cipher_xform->algo;
2301
2302         switch (cipher_xform->algo) {
2303         case RTE_CRYPTO_CIPHER_AES_CBC:
2304                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2305                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2306                 break;
2307         case RTE_CRYPTO_CIPHER_DES_CBC:
2308                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2309                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2310                 break;
2311         case RTE_CRYPTO_CIPHER_3DES_CBC:
2312                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2313                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2314                 break;
2315         case RTE_CRYPTO_CIPHER_AES_CTR:
2316                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2317                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2318                 break;
2319         default:
2320                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2321                               cipher_xform->algo);
2322                 return -ENOTSUP;
2323         }
2324         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2325                                 DIR_ENC : DIR_DEC;
2326         return 0;
2327 }
2328
2329 static int
2330 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2331                    struct rte_crypto_sym_xform *xform,
2332                    dpaa_sec_session *session)
2333 {
2334         session->aead_alg = xform->aead.algo;
2335         session->ctxt = DPAA_SEC_AEAD;
2336         session->iv.length = xform->aead.iv.length;
2337         session->iv.offset = xform->aead.iv.offset;
2338         session->auth_only_len = xform->aead.aad_length;
2339         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2340                                              RTE_CACHE_LINE_SIZE);
2341         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2342                 DPAA_SEC_ERR("No Memory for aead key\n");
2343                 return -ENOMEM;
2344         }
2345         session->aead_key.length = xform->aead.key.length;
2346         session->digest_length = xform->aead.digest_length;
2347
2348         memcpy(session->aead_key.data, xform->aead.key.data,
2349                xform->aead.key.length);
2350
2351         switch (session->aead_alg) {
2352         case RTE_CRYPTO_AEAD_AES_GCM:
2353                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2354                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2355                 break;
2356         default:
2357                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2358                 return -ENOTSUP;
2359         }
2360
2361         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2362                         DIR_ENC : DIR_DEC;
2363
2364         return 0;
2365 }
2366
2367 static struct qman_fq *
2368 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2369 {
2370         unsigned int i;
2371
2372         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2373                 if (qi->inq_attach[i] == 0) {
2374                         qi->inq_attach[i] = 1;
2375                         return &qi->inq[i];
2376                 }
2377         }
2378         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2379
2380         return NULL;
2381 }
2382
2383 static int
2384 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2385 {
2386         unsigned int i;
2387
2388         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2389                 if (&qi->inq[i] == fq) {
2390                         if (qman_retire_fq(fq, NULL) != 0)
2391                                 DPAA_SEC_DEBUG("Queue is not retired\n");
2392                         qman_oos_fq(fq);
2393                         qi->inq_attach[i] = 0;
2394                         return 0;
2395                 }
2396         }
2397         return -1;
2398 }
2399
2400 int
2401 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2402 {
2403         int ret;
2404
2405         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2406         ret = dpaa_sec_prep_cdb(sess);
2407         if (ret) {
2408                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2409                 return ret;
2410         }
2411         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2412                 ret = rte_dpaa_portal_init((void *)0);
2413                 if (ret) {
2414                         DPAA_SEC_ERR("Failure in affining portal");
2415                         return ret;
2416                 }
2417         }
2418         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2419                                rte_dpaa_mem_vtop(&sess->cdb),
2420                                qman_fq_fqid(&qp->outq));
2421         if (ret)
2422                 DPAA_SEC_ERR("Unable to init sec queue");
2423
2424         return ret;
2425 }
2426
2427 static inline void
2428 free_session_data(dpaa_sec_session *s)
2429 {
2430         if (is_aead(s))
2431                 rte_free(s->aead_key.data);
2432         else {
2433                 rte_free(s->auth_key.data);
2434                 rte_free(s->cipher_key.data);
2435         }
2436         memset(s, 0, sizeof(dpaa_sec_session));
2437 }
2438
2439 static int
2440 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2441                             struct rte_crypto_sym_xform *xform, void *sess)
2442 {
2443         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2444         dpaa_sec_session *session = sess;
2445         uint32_t i;
2446         int ret;
2447
2448         PMD_INIT_FUNC_TRACE();
2449
2450         if (unlikely(sess == NULL)) {
2451                 DPAA_SEC_ERR("invalid session struct");
2452                 return -EINVAL;
2453         }
2454         memset(session, 0, sizeof(dpaa_sec_session));
2455
2456         /* Default IV length = 0 */
2457         session->iv.length = 0;
2458
2459         /* Cipher Only */
2460         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2461                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2462                 ret = dpaa_sec_cipher_init(dev, xform, session);
2463
2464         /* Authentication Only */
2465         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2466                    xform->next == NULL) {
2467                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2468                 session->ctxt = DPAA_SEC_AUTH;
2469                 ret = dpaa_sec_auth_init(dev, xform, session);
2470
2471         /* Cipher then Authenticate */
2472         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2473                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2474                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2475                         session->auth_cipher_text = 1;
2476                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2477                                 ret = dpaa_sec_auth_init(dev, xform, session);
2478                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2479                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2480                         else
2481                                 ret = dpaa_sec_chain_init(dev, xform, session);
2482                 } else {
2483                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2484                         return -ENOTSUP;
2485                 }
2486         /* Authenticate then Cipher */
2487         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2488                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2489                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2490                         session->auth_cipher_text = 0;
2491                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2492                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2493                         else if (xform->next->cipher.algo
2494                                         == RTE_CRYPTO_CIPHER_NULL)
2495                                 ret = dpaa_sec_auth_init(dev, xform, session);
2496                         else
2497                                 ret = dpaa_sec_chain_init(dev, xform, session);
2498                 } else {
2499                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2500                         return -ENOTSUP;
2501                 }
2502
2503         /* AEAD operation for AES-GCM kind of Algorithms */
2504         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2505                    xform->next == NULL) {
2506                 ret = dpaa_sec_aead_init(dev, xform, session);
2507
2508         } else {
2509                 DPAA_SEC_ERR("Invalid crypto type");
2510                 return -EINVAL;
2511         }
2512         if (ret) {
2513                 DPAA_SEC_ERR("unable to init session");
2514                 goto err1;
2515         }
2516
2517         rte_spinlock_lock(&internals->lock);
2518         for (i = 0; i < MAX_DPAA_CORES; i++) {
2519                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2520                 if (session->inq[i] == NULL) {
2521                         DPAA_SEC_ERR("unable to attach sec queue");
2522                         rte_spinlock_unlock(&internals->lock);
2523                         ret = -EBUSY;
2524                         goto err1;
2525                 }
2526         }
2527         rte_spinlock_unlock(&internals->lock);
2528
2529         return 0;
2530
2531 err1:
2532         free_session_data(session);
2533         return ret;
2534 }
2535
2536 static int
2537 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2538                 struct rte_crypto_sym_xform *xform,
2539                 struct rte_cryptodev_sym_session *sess,
2540                 struct rte_mempool *mempool)
2541 {
2542         void *sess_private_data;
2543         int ret;
2544
2545         PMD_INIT_FUNC_TRACE();
2546
2547         if (rte_mempool_get(mempool, &sess_private_data)) {
2548                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2549                 return -ENOMEM;
2550         }
2551
2552         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2553         if (ret != 0) {
2554                 DPAA_SEC_ERR("failed to configure session parameters");
2555
2556                 /* Return session to mempool */
2557                 rte_mempool_put(mempool, sess_private_data);
2558                 return ret;
2559         }
2560
2561         set_sym_session_private_data(sess, dev->driver_id,
2562                         sess_private_data);
2563
2564
2565         return 0;
2566 }
2567
2568 static inline void
2569 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2570 {
2571         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2572         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2573         uint8_t i;
2574
2575         for (i = 0; i < MAX_DPAA_CORES; i++) {
2576                 if (s->inq[i])
2577                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2578                 s->inq[i] = NULL;
2579                 s->qp[i] = NULL;
2580         }
2581         free_session_data(s);
2582         rte_mempool_put(sess_mp, (void *)s);
2583 }
2584
2585 /** Clear the memory of session so it doesn't leave key material behind */
2586 static void
2587 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2588                 struct rte_cryptodev_sym_session *sess)
2589 {
2590         PMD_INIT_FUNC_TRACE();
2591         uint8_t index = dev->driver_id;
2592         void *sess_priv = get_sym_session_private_data(sess, index);
2593         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2594
2595         if (sess_priv) {
2596                 free_session_memory(dev, s);
2597                 set_sym_session_private_data(sess, index, NULL);
2598         }
2599 }
2600
2601 #ifdef RTE_LIB_SECURITY
2602 static int
2603 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2604                         struct rte_security_ipsec_xform *ipsec_xform,
2605                         dpaa_sec_session *session)
2606 {
2607         PMD_INIT_FUNC_TRACE();
2608
2609         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2610                                                RTE_CACHE_LINE_SIZE);
2611         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2612                 DPAA_SEC_ERR("No Memory for aead key");
2613                 return -ENOMEM;
2614         }
2615         memcpy(session->aead_key.data, aead_xform->key.data,
2616                aead_xform->key.length);
2617
2618         session->digest_length = aead_xform->digest_length;
2619         session->aead_key.length = aead_xform->key.length;
2620
2621         switch (aead_xform->algo) {
2622         case RTE_CRYPTO_AEAD_AES_GCM:
2623                 switch (session->digest_length) {
2624                 case 8:
2625                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2626                         break;
2627                 case 12:
2628                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2629                         break;
2630                 case 16:
2631                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2632                         break;
2633                 default:
2634                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2635                                      session->digest_length);
2636                         return -EINVAL;
2637                 }
2638                 if (session->dir == DIR_ENC) {
2639                         memcpy(session->encap_pdb.gcm.salt,
2640                                 (uint8_t *)&(ipsec_xform->salt), 4);
2641                 } else {
2642                         memcpy(session->decap_pdb.gcm.salt,
2643                                 (uint8_t *)&(ipsec_xform->salt), 4);
2644                 }
2645                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2646                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2647                 break;
2648         default:
2649                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2650                               aead_xform->algo);
2651                 return -ENOTSUP;
2652         }
2653         return 0;
2654 }
2655
2656 static int
2657 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2658         struct rte_crypto_auth_xform *auth_xform,
2659         struct rte_security_ipsec_xform *ipsec_xform,
2660         dpaa_sec_session *session)
2661 {
2662         if (cipher_xform) {
2663                 session->cipher_key.data = rte_zmalloc(NULL,
2664                                                        cipher_xform->key.length,
2665                                                        RTE_CACHE_LINE_SIZE);
2666                 if (session->cipher_key.data == NULL &&
2667                                 cipher_xform->key.length > 0) {
2668                         DPAA_SEC_ERR("No Memory for cipher key");
2669                         return -ENOMEM;
2670                 }
2671
2672                 session->cipher_key.length = cipher_xform->key.length;
2673                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2674                                 cipher_xform->key.length);
2675                 session->cipher_alg = cipher_xform->algo;
2676         } else {
2677                 session->cipher_key.data = NULL;
2678                 session->cipher_key.length = 0;
2679                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2680         }
2681
2682         if (auth_xform) {
2683                 session->auth_key.data = rte_zmalloc(NULL,
2684                                                 auth_xform->key.length,
2685                                                 RTE_CACHE_LINE_SIZE);
2686                 if (session->auth_key.data == NULL &&
2687                                 auth_xform->key.length > 0) {
2688                         DPAA_SEC_ERR("No Memory for auth key");
2689                         return -ENOMEM;
2690                 }
2691                 session->auth_key.length = auth_xform->key.length;
2692                 memcpy(session->auth_key.data, auth_xform->key.data,
2693                                 auth_xform->key.length);
2694                 session->auth_alg = auth_xform->algo;
2695                 session->digest_length = auth_xform->digest_length;
2696         } else {
2697                 session->auth_key.data = NULL;
2698                 session->auth_key.length = 0;
2699                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2700         }
2701
2702         switch (session->auth_alg) {
2703         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2704                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2705                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2706                 break;
2707         case RTE_CRYPTO_AUTH_MD5_HMAC:
2708                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2709                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2710                 break;
2711         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2712                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2713                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2714                 if (session->digest_length != 16)
2715                         DPAA_SEC_WARN(
2716                         "+++Using sha256-hmac truncated len is non-standard,"
2717                         "it will not work with lookaside proto");
2718                 break;
2719         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2720                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2721                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2722                 break;
2723         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2724                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2725                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2726                 break;
2727         case RTE_CRYPTO_AUTH_AES_CMAC:
2728                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2729                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2730                 break;
2731         case RTE_CRYPTO_AUTH_NULL:
2732                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2733                 break;
2734         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2735                 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2736                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2737                 break;
2738         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2739         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2740         case RTE_CRYPTO_AUTH_SHA1:
2741         case RTE_CRYPTO_AUTH_SHA256:
2742         case RTE_CRYPTO_AUTH_SHA512:
2743         case RTE_CRYPTO_AUTH_SHA224:
2744         case RTE_CRYPTO_AUTH_SHA384:
2745         case RTE_CRYPTO_AUTH_MD5:
2746         case RTE_CRYPTO_AUTH_AES_GMAC:
2747         case RTE_CRYPTO_AUTH_KASUMI_F9:
2748         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2749         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2750                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2751                               session->auth_alg);
2752                 return -ENOTSUP;
2753         default:
2754                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2755                               session->auth_alg);
2756                 return -ENOTSUP;
2757         }
2758
2759         switch (session->cipher_alg) {
2760         case RTE_CRYPTO_CIPHER_AES_CBC:
2761                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2762                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2763                 break;
2764         case RTE_CRYPTO_CIPHER_DES_CBC:
2765                 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2766                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2767                 break;
2768         case RTE_CRYPTO_CIPHER_3DES_CBC:
2769                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2770                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2771                 break;
2772         case RTE_CRYPTO_CIPHER_AES_CTR:
2773                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2774                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2775                 if (session->dir == DIR_ENC) {
2776                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2777                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2778                 } else {
2779                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2780                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2781                 }
2782                 break;
2783         case RTE_CRYPTO_CIPHER_NULL:
2784                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2785                 break;
2786         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2787         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2788         case RTE_CRYPTO_CIPHER_3DES_ECB:
2789         case RTE_CRYPTO_CIPHER_AES_ECB:
2790         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2791                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2792                               session->cipher_alg);
2793                 return -ENOTSUP;
2794         default:
2795                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2796                               session->cipher_alg);
2797                 return -ENOTSUP;
2798         }
2799
2800         return 0;
2801 }
2802
2803 static int
2804 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2805                            struct rte_security_session_conf *conf,
2806                            void *sess)
2807 {
2808         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2809         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2810         struct rte_crypto_auth_xform *auth_xform = NULL;
2811         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2812         struct rte_crypto_aead_xform *aead_xform = NULL;
2813         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2814         uint32_t i;
2815         int ret;
2816
2817         PMD_INIT_FUNC_TRACE();
2818
2819         memset(session, 0, sizeof(dpaa_sec_session));
2820         session->proto_alg = conf->protocol;
2821         session->ctxt = DPAA_SEC_IPSEC;
2822
2823         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2824                 session->dir = DIR_ENC;
2825         else
2826                 session->dir = DIR_DEC;
2827
2828         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2829                 cipher_xform = &conf->crypto_xform->cipher;
2830                 if (conf->crypto_xform->next)
2831                         auth_xform = &conf->crypto_xform->next->auth;
2832                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2833                                         ipsec_xform, session);
2834         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2835                 auth_xform = &conf->crypto_xform->auth;
2836                 if (conf->crypto_xform->next)
2837                         cipher_xform = &conf->crypto_xform->next->cipher;
2838                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2839                                         ipsec_xform, session);
2840         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2841                 aead_xform = &conf->crypto_xform->aead;
2842                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2843                                         ipsec_xform, session);
2844         } else {
2845                 DPAA_SEC_ERR("XFORM not specified");
2846                 ret = -EINVAL;
2847                 goto out;
2848         }
2849         if (ret) {
2850                 DPAA_SEC_ERR("Failed to process xform");
2851                 goto out;
2852         }
2853
2854         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2855                 if (ipsec_xform->tunnel.type ==
2856                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2857                         session->ip4_hdr.ip_v = IPVERSION;
2858                         session->ip4_hdr.ip_hl = 5;
2859                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2860                                                 sizeof(session->ip4_hdr));
2861                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2862                         session->ip4_hdr.ip_id = 0;
2863                         session->ip4_hdr.ip_off = 0;
2864                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2865                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2866                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2867                                         IPPROTO_ESP : IPPROTO_AH;
2868                         session->ip4_hdr.ip_sum = 0;
2869                         session->ip4_hdr.ip_src =
2870                                         ipsec_xform->tunnel.ipv4.src_ip;
2871                         session->ip4_hdr.ip_dst =
2872                                         ipsec_xform->tunnel.ipv4.dst_ip;
2873                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2874                                                 (void *)&session->ip4_hdr,
2875                                                 sizeof(struct ip));
2876                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2877                 } else if (ipsec_xform->tunnel.type ==
2878                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2879                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2880                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2881                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2882                                         RTE_IPV6_HDR_TC_SHIFT) &
2883                                         RTE_IPV6_HDR_TC_MASK) |
2884                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2885                                         RTE_IPV6_HDR_FL_SHIFT) &
2886                                         RTE_IPV6_HDR_FL_MASK));
2887                         /* Payload length will be updated by HW */
2888                         session->ip6_hdr.payload_len = 0;
2889                         session->ip6_hdr.hop_limits =
2890                                         ipsec_xform->tunnel.ipv6.hlimit;
2891                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2892                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2893                                         IPPROTO_ESP : IPPROTO_AH;
2894                         memcpy(&session->ip6_hdr.src_addr,
2895                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2896                         memcpy(&session->ip6_hdr.dst_addr,
2897                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2898                         session->encap_pdb.ip_hdr_len =
2899                                                 sizeof(struct rte_ipv6_hdr);
2900                 }
2901                 session->encap_pdb.options =
2902                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2903                         PDBOPTS_ESP_OIHI_PDB_INL |
2904                         PDBOPTS_ESP_IVSRC |
2905                         PDBHMO_ESP_ENCAP_DTTL |
2906                         PDBHMO_ESP_SNR;
2907                 if (ipsec_xform->options.esn)
2908                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2909                 session->encap_pdb.spi = ipsec_xform->spi;
2910
2911         } else if (ipsec_xform->direction ==
2912                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2913                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2914                         session->decap_pdb.options = sizeof(struct ip) << 16;
2915                 else
2916                         session->decap_pdb.options =
2917                                         sizeof(struct rte_ipv6_hdr) << 16;
2918                 if (ipsec_xform->options.esn)
2919                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2920                 if (ipsec_xform->replay_win_sz) {
2921                         uint32_t win_sz;
2922                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2923
2924                         switch (win_sz) {
2925                         case 1:
2926                         case 2:
2927                         case 4:
2928                         case 8:
2929                         case 16:
2930                         case 32:
2931                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2932                                 break;
2933                         case 64:
2934                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2935                                 break;
2936                         default:
2937                                 session->decap_pdb.options |=
2938                                                         PDBOPTS_ESP_ARS128;
2939                         }
2940                 }
2941         } else
2942                 goto out;
2943         rte_spinlock_lock(&internals->lock);
2944         for (i = 0; i < MAX_DPAA_CORES; i++) {
2945                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2946                 if (session->inq[i] == NULL) {
2947                         DPAA_SEC_ERR("unable to attach sec queue");
2948                         rte_spinlock_unlock(&internals->lock);
2949                         goto out;
2950                 }
2951         }
2952         rte_spinlock_unlock(&internals->lock);
2953
2954         return 0;
2955 out:
2956         free_session_data(session);
2957         return -1;
2958 }
2959
2960 static int
2961 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2962                           struct rte_security_session_conf *conf,
2963                           void *sess)
2964 {
2965         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2966         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2967         struct rte_crypto_auth_xform *auth_xform = NULL;
2968         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2969         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2970         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2971         uint32_t i;
2972         int ret;
2973
2974         PMD_INIT_FUNC_TRACE();
2975
2976         memset(session, 0, sizeof(dpaa_sec_session));
2977
2978         /* find xfrm types */
2979         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2980                 cipher_xform = &xform->cipher;
2981                 if (xform->next != NULL)
2982                         auth_xform = &xform->next->auth;
2983         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2984                 auth_xform = &xform->auth;
2985                 if (xform->next != NULL)
2986                         cipher_xform = &xform->next->cipher;
2987         } else {
2988                 DPAA_SEC_ERR("Invalid crypto type");
2989                 return -EINVAL;
2990         }
2991
2992         session->proto_alg = conf->protocol;
2993         session->ctxt = DPAA_SEC_PDCP;
2994
2995         if (cipher_xform) {
2996                 switch (cipher_xform->algo) {
2997                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2998                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2999                         break;
3000                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3001                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3002                         break;
3003                 case RTE_CRYPTO_CIPHER_AES_CTR:
3004                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3005                         break;
3006                 case RTE_CRYPTO_CIPHER_NULL:
3007                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3008                         break;
3009                 default:
3010                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3011                                       session->cipher_alg);
3012                         return -EINVAL;
3013                 }
3014
3015                 session->cipher_key.data = rte_zmalloc(NULL,
3016                                                cipher_xform->key.length,
3017                                                RTE_CACHE_LINE_SIZE);
3018                 if (session->cipher_key.data == NULL &&
3019                                 cipher_xform->key.length > 0) {
3020                         DPAA_SEC_ERR("No Memory for cipher key");
3021                         return -ENOMEM;
3022                 }
3023                 session->cipher_key.length = cipher_xform->key.length;
3024                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3025                         cipher_xform->key.length);
3026                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3027                                         DIR_ENC : DIR_DEC;
3028                 session->cipher_alg = cipher_xform->algo;
3029         } else {
3030                 session->cipher_key.data = NULL;
3031                 session->cipher_key.length = 0;
3032                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3033                 session->dir = DIR_ENC;
3034         }
3035
3036         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3037                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3038                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3039                         DPAA_SEC_ERR(
3040                                 "PDCP Seq Num size should be 5/12 bits for cmode");
3041                         ret = -EINVAL;
3042                         goto out;
3043                 }
3044         }
3045
3046         if (auth_xform) {
3047                 switch (auth_xform->algo) {
3048                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3049                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3050                         break;
3051                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3052                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3053                         break;
3054                 case RTE_CRYPTO_AUTH_AES_CMAC:
3055                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3056                         break;
3057                 case RTE_CRYPTO_AUTH_NULL:
3058                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3059                         break;
3060                 default:
3061                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3062                                       session->auth_alg);
3063                         rte_free(session->cipher_key.data);
3064                         return -EINVAL;
3065                 }
3066                 session->auth_key.data = rte_zmalloc(NULL,
3067                                                      auth_xform->key.length,
3068                                                      RTE_CACHE_LINE_SIZE);
3069                 if (!session->auth_key.data &&
3070                     auth_xform->key.length > 0) {
3071                         DPAA_SEC_ERR("No Memory for auth key");
3072                         rte_free(session->cipher_key.data);
3073                         return -ENOMEM;
3074                 }
3075                 session->auth_key.length = auth_xform->key.length;
3076                 memcpy(session->auth_key.data, auth_xform->key.data,
3077                        auth_xform->key.length);
3078                 session->auth_alg = auth_xform->algo;
3079         } else {
3080                 session->auth_key.data = NULL;
3081                 session->auth_key.length = 0;
3082                 session->auth_alg = 0;
3083         }
3084         session->pdcp.domain = pdcp_xform->domain;
3085         session->pdcp.bearer = pdcp_xform->bearer;
3086         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3087         session->pdcp.sn_size = pdcp_xform->sn_size;
3088         session->pdcp.hfn = pdcp_xform->hfn;
3089         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3090         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3091         session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3092         if (cipher_xform)
3093                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3094
3095         rte_spinlock_lock(&dev_priv->lock);
3096         for (i = 0; i < MAX_DPAA_CORES; i++) {
3097                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3098                 if (session->inq[i] == NULL) {
3099                         DPAA_SEC_ERR("unable to attach sec queue");
3100                         rte_spinlock_unlock(&dev_priv->lock);
3101                         ret = -EBUSY;
3102                         goto out;
3103                 }
3104         }
3105         rte_spinlock_unlock(&dev_priv->lock);
3106         return 0;
3107 out:
3108         rte_free(session->auth_key.data);
3109         rte_free(session->cipher_key.data);
3110         memset(session, 0, sizeof(dpaa_sec_session));
3111         return ret;
3112 }
3113
3114 static int
3115 dpaa_sec_security_session_create(void *dev,
3116                                  struct rte_security_session_conf *conf,
3117                                  struct rte_security_session *sess,
3118                                  struct rte_mempool *mempool)
3119 {
3120         void *sess_private_data;
3121         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3122         int ret;
3123
3124         if (rte_mempool_get(mempool, &sess_private_data)) {
3125                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3126                 return -ENOMEM;
3127         }
3128
3129         switch (conf->protocol) {
3130         case RTE_SECURITY_PROTOCOL_IPSEC:
3131                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3132                                 sess_private_data);
3133                 break;
3134         case RTE_SECURITY_PROTOCOL_PDCP:
3135                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3136                                 sess_private_data);
3137                 break;
3138         case RTE_SECURITY_PROTOCOL_MACSEC:
3139                 return -ENOTSUP;
3140         default:
3141                 return -EINVAL;
3142         }
3143         if (ret != 0) {
3144                 DPAA_SEC_ERR("failed to configure session parameters");
3145                 /* Return session to mempool */
3146                 rte_mempool_put(mempool, sess_private_data);
3147                 return ret;
3148         }
3149
3150         set_sec_session_private_data(sess, sess_private_data);
3151
3152         return ret;
3153 }
3154
3155 /** Clear the memory of session so it doesn't leave key material behind */
3156 static int
3157 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3158                 struct rte_security_session *sess)
3159 {
3160         PMD_INIT_FUNC_TRACE();
3161         void *sess_priv = get_sec_session_private_data(sess);
3162         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3163
3164         if (sess_priv) {
3165                 free_session_memory((struct rte_cryptodev *)dev, s);
3166                 set_sec_session_private_data(sess, NULL);
3167         }
3168         return 0;
3169 }
3170 #endif
3171 static int
3172 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3173                        struct rte_cryptodev_config *config __rte_unused)
3174 {
3175         PMD_INIT_FUNC_TRACE();
3176
3177         return 0;
3178 }
3179
3180 static int
3181 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3182 {
3183         PMD_INIT_FUNC_TRACE();
3184         return 0;
3185 }
3186
3187 static void
3188 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3189 {
3190         PMD_INIT_FUNC_TRACE();
3191 }
3192
3193 static int
3194 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3195 {
3196         PMD_INIT_FUNC_TRACE();
3197
3198         if (dev == NULL)
3199                 return -ENOMEM;
3200
3201         return 0;
3202 }
3203
3204 static void
3205 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3206                        struct rte_cryptodev_info *info)
3207 {
3208         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3209
3210         PMD_INIT_FUNC_TRACE();
3211         if (info != NULL) {
3212                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3213                 info->feature_flags = dev->feature_flags;
3214                 info->capabilities = dpaa_sec_capabilities;
3215                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3216                 info->driver_id = dpaa_cryptodev_driver_id;
3217         }
3218 }
3219
3220 static enum qman_cb_dqrr_result
3221 dpaa_sec_process_parallel_event(void *event,
3222                         struct qman_portal *qm __always_unused,
3223                         struct qman_fq *outq,
3224                         const struct qm_dqrr_entry *dqrr,
3225                         void **bufs)
3226 {
3227         const struct qm_fd *fd;
3228         struct dpaa_sec_job *job;
3229         struct dpaa_sec_op_ctx *ctx;
3230         struct rte_event *ev = (struct rte_event *)event;
3231
3232         fd = &dqrr->fd;
3233
3234         /* sg is embedded in an op ctx,
3235          * sg[0] is for output
3236          * sg[1] for input
3237          */
3238         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3239
3240         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3241         ctx->fd_status = fd->status;
3242         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3243                 struct qm_sg_entry *sg_out;
3244                 uint32_t len;
3245
3246                 sg_out = &job->sg[0];
3247                 hw_sg_to_cpu(sg_out);
3248                 len = sg_out->length;
3249                 ctx->op->sym->m_src->pkt_len = len;
3250                 ctx->op->sym->m_src->data_len = len;
3251         }
3252         if (!ctx->fd_status) {
3253                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3254         } else {
3255                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3256                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3257         }
3258         ev->event_ptr = (void *)ctx->op;
3259
3260         ev->flow_id = outq->ev.flow_id;
3261         ev->sub_event_type = outq->ev.sub_event_type;
3262         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3263         ev->op = RTE_EVENT_OP_NEW;
3264         ev->sched_type = outq->ev.sched_type;
3265         ev->queue_id = outq->ev.queue_id;
3266         ev->priority = outq->ev.priority;
3267         *bufs = (void *)ctx->op;
3268
3269         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3270
3271         return qman_cb_dqrr_consume;
3272 }
3273
3274 static enum qman_cb_dqrr_result
3275 dpaa_sec_process_atomic_event(void *event,
3276                         struct qman_portal *qm __rte_unused,
3277                         struct qman_fq *outq,
3278                         const struct qm_dqrr_entry *dqrr,
3279                         void **bufs)
3280 {
3281         u8 index;
3282         const struct qm_fd *fd;
3283         struct dpaa_sec_job *job;
3284         struct dpaa_sec_op_ctx *ctx;
3285         struct rte_event *ev = (struct rte_event *)event;
3286
3287         fd = &dqrr->fd;
3288
3289         /* sg is embedded in an op ctx,
3290          * sg[0] is for output
3291          * sg[1] for input
3292          */
3293         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3294
3295         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3296         ctx->fd_status = fd->status;
3297         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3298                 struct qm_sg_entry *sg_out;
3299                 uint32_t len;
3300
3301                 sg_out = &job->sg[0];
3302                 hw_sg_to_cpu(sg_out);
3303                 len = sg_out->length;
3304                 ctx->op->sym->m_src->pkt_len = len;
3305                 ctx->op->sym->m_src->data_len = len;
3306         }
3307         if (!ctx->fd_status) {
3308                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3309         } else {
3310                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3311                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3312         }
3313         ev->event_ptr = (void *)ctx->op;
3314         ev->flow_id = outq->ev.flow_id;
3315         ev->sub_event_type = outq->ev.sub_event_type;
3316         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3317         ev->op = RTE_EVENT_OP_NEW;
3318         ev->sched_type = outq->ev.sched_type;
3319         ev->queue_id = outq->ev.queue_id;
3320         ev->priority = outq->ev.priority;
3321
3322         /* Save active dqrr entries */
3323         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3324         DPAA_PER_LCORE_DQRR_SIZE++;
3325         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3326         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3327         ev->impl_opaque = index + 1;
3328         *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3329         *bufs = (void *)ctx->op;
3330
3331         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3332
3333         return qman_cb_dqrr_defer;
3334 }
3335
3336 int
3337 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3338                 int qp_id,
3339                 uint16_t ch_id,
3340                 const struct rte_event *event)
3341 {
3342         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3343         struct qm_mcc_initfq opts = {0};
3344
3345         int ret;
3346
3347         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3348                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3349         opts.fqd.dest.channel = ch_id;
3350
3351         switch (event->sched_type) {
3352         case RTE_SCHED_TYPE_ATOMIC:
3353                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3354                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3355                  * configuration with HOLD_ACTIVE setting
3356                  */
3357                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3358                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3359                 break;
3360         case RTE_SCHED_TYPE_ORDERED:
3361                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3362                 return -ENOTSUP;
3363         default:
3364                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3365                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3366                 break;
3367         }
3368
3369         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3370         if (unlikely(ret)) {
3371                 DPAA_SEC_ERR("unable to init caam source fq!");
3372                 return ret;
3373         }
3374
3375         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3376
3377         return 0;
3378 }
3379
3380 int
3381 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3382                         int qp_id)
3383 {
3384         struct qm_mcc_initfq opts = {0};
3385         int ret;
3386         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3387
3388         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3389                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3390         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3391         qp->outq.cb.ern  = ern_sec_fq_handler;
3392         qman_retire_fq(&qp->outq, NULL);
3393         qman_oos_fq(&qp->outq);
3394         ret = qman_init_fq(&qp->outq, 0, &opts);
3395         if (ret)
3396                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3397         qp->outq.cb.dqrr = NULL;
3398
3399         return ret;
3400 }
3401
3402 static struct rte_cryptodev_ops crypto_ops = {
3403         .dev_configure        = dpaa_sec_dev_configure,
3404         .dev_start            = dpaa_sec_dev_start,
3405         .dev_stop             = dpaa_sec_dev_stop,
3406         .dev_close            = dpaa_sec_dev_close,
3407         .dev_infos_get        = dpaa_sec_dev_infos_get,
3408         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3409         .queue_pair_release   = dpaa_sec_queue_pair_release,
3410         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3411         .sym_session_configure    = dpaa_sec_sym_session_configure,
3412         .sym_session_clear        = dpaa_sec_sym_session_clear,
3413         /* Raw data-path API related operations */
3414         .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3415         .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3416 };
3417
3418 #ifdef RTE_LIB_SECURITY
3419 static const struct rte_security_capability *
3420 dpaa_sec_capabilities_get(void *device __rte_unused)
3421 {
3422         return dpaa_sec_security_cap;
3423 }
3424
3425 static const struct rte_security_ops dpaa_sec_security_ops = {
3426         .session_create = dpaa_sec_security_session_create,
3427         .session_update = NULL,
3428         .session_stats_get = NULL,
3429         .session_destroy = dpaa_sec_security_session_destroy,
3430         .set_pkt_metadata = NULL,
3431         .capabilities_get = dpaa_sec_capabilities_get
3432 };
3433 #endif
3434 static int
3435 dpaa_sec_uninit(struct rte_cryptodev *dev)
3436 {
3437         struct dpaa_sec_dev_private *internals;
3438
3439         if (dev == NULL)
3440                 return -ENODEV;
3441
3442         internals = dev->data->dev_private;
3443         rte_free(dev->security_ctx);
3444
3445         rte_free(internals);
3446
3447         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3448                       dev->data->name, rte_socket_id());
3449
3450         return 0;
3451 }
3452
3453 static int
3454 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3455 {
3456         struct dpaa_sec_dev_private *internals;
3457 #ifdef RTE_LIB_SECURITY
3458         struct rte_security_ctx *security_instance;
3459 #endif
3460         struct dpaa_sec_qp *qp;
3461         uint32_t i, flags;
3462         int ret;
3463
3464         PMD_INIT_FUNC_TRACE();
3465
3466         cryptodev->driver_id = dpaa_cryptodev_driver_id;
3467         cryptodev->dev_ops = &crypto_ops;
3468
3469         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3470         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3471         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3472                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3473                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3474                         RTE_CRYPTODEV_FF_SECURITY |
3475                         RTE_CRYPTODEV_FF_SYM_RAW_DP |
3476                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3477                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3478                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3479                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3480                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3481
3482         internals = cryptodev->data->dev_private;
3483         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3484         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3485
3486         /*
3487          * For secondary processes, we don't initialise any further as primary
3488          * has already done this work. Only check we don't need a different
3489          * RX function
3490          */
3491         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3492                 DPAA_SEC_WARN("Device already init by primary process");
3493                 return 0;
3494         }
3495 #ifdef RTE_LIB_SECURITY
3496         /* Initialize security_ctx only for primary process*/
3497         security_instance = rte_malloc("rte_security_instances_ops",
3498                                 sizeof(struct rte_security_ctx), 0);
3499         if (security_instance == NULL)
3500                 return -ENOMEM;
3501         security_instance->device = (void *)cryptodev;
3502         security_instance->ops = &dpaa_sec_security_ops;
3503         security_instance->sess_cnt = 0;
3504         cryptodev->security_ctx = security_instance;
3505 #endif
3506         rte_spinlock_init(&internals->lock);
3507         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3508                 /* init qman fq for queue pair */
3509                 qp = &internals->qps[i];
3510                 ret = dpaa_sec_init_tx(&qp->outq);
3511                 if (ret) {
3512                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3513                         goto init_error;
3514                 }
3515         }
3516
3517         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3518                 QMAN_FQ_FLAG_TO_DCPORTAL;
3519         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3520                 /* create rx qman fq for sessions*/
3521                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3522                 if (unlikely(ret != 0)) {
3523                         DPAA_SEC_ERR("sec qman_create_fq failed");
3524                         goto init_error;
3525                 }
3526         }
3527
3528         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3529         return 0;
3530
3531 init_error:
3532         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3533
3534         rte_free(cryptodev->security_ctx);
3535         return -EFAULT;
3536 }
3537
3538 static int
3539 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3540                                 struct rte_dpaa_device *dpaa_dev)
3541 {
3542         struct rte_cryptodev *cryptodev;
3543         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3544
3545         int retval;
3546
3547         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3548
3549         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3550         if (cryptodev == NULL)
3551                 return -ENOMEM;
3552
3553         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3554                 cryptodev->data->dev_private = rte_zmalloc_socket(
3555                                         "cryptodev private structure",
3556                                         sizeof(struct dpaa_sec_dev_private),
3557                                         RTE_CACHE_LINE_SIZE,
3558                                         rte_socket_id());
3559
3560                 if (cryptodev->data->dev_private == NULL)
3561                         rte_panic("Cannot allocate memzone for private "
3562                                         "device data");
3563         }
3564
3565         dpaa_dev->crypto_dev = cryptodev;
3566         cryptodev->device = &dpaa_dev->device;
3567
3568         /* init user callbacks */
3569         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3570
3571         /* if sec device version is not configured */
3572         if (!rta_get_sec_era()) {
3573                 const struct device_node *caam_node;
3574
3575                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3576                         const uint32_t *prop = of_get_property(caam_node,
3577                                         "fsl,sec-era",
3578                                         NULL);
3579                         if (prop) {
3580                                 rta_set_sec_era(
3581                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3582                                 break;
3583                         }
3584                 }
3585         }
3586
3587         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3588                 retval = rte_dpaa_portal_init((void *)1);
3589                 if (retval) {
3590                         DPAA_SEC_ERR("Unable to initialize portal");
3591                         goto out;
3592                 }
3593         }
3594
3595         /* Invoke PMD device initialization function */
3596         retval = dpaa_sec_dev_init(cryptodev);
3597         if (retval == 0) {
3598                 rte_cryptodev_pmd_probing_finish(cryptodev);
3599                 return 0;
3600         }
3601
3602         retval = -ENXIO;
3603 out:
3604         /* In case of error, cleanup is done */
3605         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3606                 rte_free(cryptodev->data->dev_private);
3607
3608         rte_cryptodev_pmd_release_device(cryptodev);
3609
3610         return retval;
3611 }
3612
3613 static int
3614 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3615 {
3616         struct rte_cryptodev *cryptodev;
3617         int ret;
3618
3619         cryptodev = dpaa_dev->crypto_dev;
3620         if (cryptodev == NULL)
3621                 return -ENODEV;
3622
3623         ret = dpaa_sec_uninit(cryptodev);
3624         if (ret)
3625                 return ret;
3626
3627         return rte_cryptodev_pmd_destroy(cryptodev);
3628 }
3629
3630 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3631         .drv_type = FSL_DPAA_CRYPTO,
3632         .driver = {
3633                 .name = "DPAA SEC PMD"
3634         },
3635         .probe = cryptodev_dpaa_sec_probe,
3636         .remove = cryptodev_dpaa_sec_remove,
3637 };
3638
3639 static struct cryptodev_driver dpaa_sec_crypto_drv;
3640
3641 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3642 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3643                 dpaa_cryptodev_driver_id);
3644 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);