common/dpaax: remove outdated caamflib code
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2021 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41
42 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47
48 uint8_t dpaa_cryptodev_driver_id;
49
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53         if (!ctx->fd_status) {
54                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55         } else {
56                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58         }
59 }
60
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
63 {
64         struct dpaa_sec_op_ctx *ctx;
65         int i, retval;
66
67         retval = rte_mempool_get(
68                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
69                         (void **)(&ctx));
70         if (!ctx || retval) {
71                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
72                 return NULL;
73         }
74         /*
75          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78          * each packet, memset is costlier than dcbz_64().
79          */
80         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81                 dcbz_64(&ctx->job.sg[i]);
82
83         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
85
86         return ctx;
87 }
88
89 static void
90 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
91                    struct qman_fq *fq,
92                    const struct qm_mr_entry *msg)
93 {
94         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
95                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
96 }
97
98 /* initialize the queue with dest chan as caam chan so that
99  * all the packets in this queue could be dispatched into caam
100  */
101 static int
102 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
103                  uint32_t fqid_out)
104 {
105         struct qm_mcc_initfq fq_opts;
106         uint32_t flags;
107         int ret = -1;
108
109         /* Clear FQ options */
110         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
111
112         flags = QMAN_INITFQ_FLAG_SCHED;
113         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
114                           QM_INITFQ_WE_CONTEXTB;
115
116         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
117         fq_opts.fqd.context_b = fqid_out;
118         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
119         fq_opts.fqd.dest.wq = 0;
120
121         fq_in->cb.ern  = ern_sec_fq_handler;
122
123         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
124
125         ret = qman_init_fq(fq_in, flags, &fq_opts);
126         if (unlikely(ret != 0))
127                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
128
129         return ret;
130 }
131
132 /* something is put into in_fq and caam put the crypto result into out_fq */
133 static enum qman_cb_dqrr_result
134 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
135                   struct qman_fq *fq __always_unused,
136                   const struct qm_dqrr_entry *dqrr)
137 {
138         const struct qm_fd *fd;
139         struct dpaa_sec_job *job;
140         struct dpaa_sec_op_ctx *ctx;
141
142         if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
143                 return qman_cb_dqrr_defer;
144
145         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
146                 return qman_cb_dqrr_consume;
147
148         fd = &dqrr->fd;
149         /* sg is embedded in an op ctx,
150          * sg[0] is for output
151          * sg[1] for input
152          */
153         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
154
155         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
156         ctx->fd_status = fd->status;
157         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
158                 struct qm_sg_entry *sg_out;
159                 uint32_t len;
160                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
161                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
162
163                 sg_out = &job->sg[0];
164                 hw_sg_to_cpu(sg_out);
165                 len = sg_out->length;
166                 mbuf->pkt_len = len;
167                 while (mbuf->next != NULL) {
168                         len -= mbuf->data_len;
169                         mbuf = mbuf->next;
170                 }
171                 mbuf->data_len = len;
172         }
173         DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
174         dpaa_sec_op_ending(ctx);
175
176         return qman_cb_dqrr_consume;
177 }
178
179 /* caam result is put into this queue */
180 static int
181 dpaa_sec_init_tx(struct qman_fq *fq)
182 {
183         int ret;
184         struct qm_mcc_initfq opts;
185         uint32_t flags;
186
187         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
188                 QMAN_FQ_FLAG_DYNAMIC_FQID;
189
190         ret = qman_create_fq(0, flags, fq);
191         if (unlikely(ret)) {
192                 DPAA_SEC_ERR("qman_create_fq failed");
193                 return ret;
194         }
195
196         memset(&opts, 0, sizeof(opts));
197         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
198                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
199
200         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
201
202         fq->cb.dqrr = dqrr_out_fq_cb_rx;
203         fq->cb.ern  = ern_sec_fq_handler;
204
205         ret = qman_init_fq(fq, 0, &opts);
206         if (unlikely(ret)) {
207                 DPAA_SEC_ERR("unable to init caam source fq!");
208                 return ret;
209         }
210
211         return ret;
212 }
213
214 static inline int is_aead(dpaa_sec_session *ses)
215 {
216         return ((ses->cipher_alg == 0) &&
217                 (ses->auth_alg == 0) &&
218                 (ses->aead_alg != 0));
219 }
220
221 static inline int is_encode(dpaa_sec_session *ses)
222 {
223         return ses->dir == DIR_ENC;
224 }
225
226 static inline int is_decode(dpaa_sec_session *ses)
227 {
228         return ses->dir == DIR_DEC;
229 }
230
231 #ifdef RTE_LIB_SECURITY
232 static int
233 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
234 {
235         struct alginfo authdata = {0}, cipherdata = {0};
236         struct sec_cdb *cdb = &ses->cdb;
237         struct alginfo *p_authdata = NULL;
238         int32_t shared_desc_len = 0;
239 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
240         int swap = false;
241 #else
242         int swap = true;
243 #endif
244
245         cipherdata.key = (size_t)ses->cipher_key.data;
246         cipherdata.keylen = ses->cipher_key.length;
247         cipherdata.key_enc_flags = 0;
248         cipherdata.key_type = RTA_DATA_IMM;
249         cipherdata.algtype = ses->cipher_key.alg;
250         cipherdata.algmode = ses->cipher_key.algmode;
251
252         if (ses->auth_alg) {
253                 authdata.key = (size_t)ses->auth_key.data;
254                 authdata.keylen = ses->auth_key.length;
255                 authdata.key_enc_flags = 0;
256                 authdata.key_type = RTA_DATA_IMM;
257                 authdata.algtype = ses->auth_key.alg;
258                 authdata.algmode = ses->auth_key.algmode;
259
260                 p_authdata = &authdata;
261         }
262
263         if (ses->pdcp.sdap_enabled) {
264                 int nb_keys_to_inline =
265                                 rta_inline_pdcp_sdap_query(authdata.algtype,
266                                         cipherdata.algtype,
267                                         ses->pdcp.sn_size,
268                                         ses->pdcp.hfn_ovd);
269                 if (nb_keys_to_inline >= 1) {
270                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
271                                                 (size_t)cipherdata.key);
272                         cipherdata.key_type = RTA_DATA_PTR;
273                 }
274                 if (nb_keys_to_inline >= 2) {
275                         authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
276                                                 (size_t)authdata.key);
277                         authdata.key_type = RTA_DATA_PTR;
278                 }
279         } else {
280                 if (rta_inline_pdcp_query(authdata.algtype,
281                                         cipherdata.algtype,
282                                         ses->pdcp.sn_size,
283                                         ses->pdcp.hfn_ovd)) {
284                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
285                                                 (size_t)cipherdata.key);
286                         cipherdata.key_type = RTA_DATA_PTR;
287                 }
288         }
289
290         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
291                 if (ses->dir == DIR_ENC)
292                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
293                                         cdb->sh_desc, 1, swap,
294                                         ses->pdcp.hfn,
295                                         ses->pdcp.sn_size,
296                                         ses->pdcp.bearer,
297                                         ses->pdcp.pkt_dir,
298                                         ses->pdcp.hfn_threshold,
299                                         &cipherdata, &authdata);
300                 else if (ses->dir == DIR_DEC)
301                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
302                                         cdb->sh_desc, 1, swap,
303                                         ses->pdcp.hfn,
304                                         ses->pdcp.sn_size,
305                                         ses->pdcp.bearer,
306                                         ses->pdcp.pkt_dir,
307                                         ses->pdcp.hfn_threshold,
308                                         &cipherdata, &authdata);
309         } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
310                 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
311                                                      1, swap, &authdata);
312         } else {
313                 if (ses->dir == DIR_ENC) {
314                         if (ses->pdcp.sdap_enabled)
315                                 shared_desc_len =
316                                         cnstr_shdsc_pdcp_sdap_u_plane_encap(
317                                                 cdb->sh_desc, 1, swap,
318                                                 ses->pdcp.sn_size,
319                                                 ses->pdcp.hfn,
320                                                 ses->pdcp.bearer,
321                                                 ses->pdcp.pkt_dir,
322                                                 ses->pdcp.hfn_threshold,
323                                                 &cipherdata, p_authdata);
324                         else
325                                 shared_desc_len =
326                                         cnstr_shdsc_pdcp_u_plane_encap(
327                                                 cdb->sh_desc, 1, swap,
328                                                 ses->pdcp.sn_size,
329                                                 ses->pdcp.hfn,
330                                                 ses->pdcp.bearer,
331                                                 ses->pdcp.pkt_dir,
332                                                 ses->pdcp.hfn_threshold,
333                                                 &cipherdata, p_authdata);
334                 } else if (ses->dir == DIR_DEC) {
335                         if (ses->pdcp.sdap_enabled)
336                                 shared_desc_len =
337                                         cnstr_shdsc_pdcp_sdap_u_plane_decap(
338                                                 cdb->sh_desc, 1, swap,
339                                                 ses->pdcp.sn_size,
340                                                 ses->pdcp.hfn,
341                                                 ses->pdcp.bearer,
342                                                 ses->pdcp.pkt_dir,
343                                                 ses->pdcp.hfn_threshold,
344                                                 &cipherdata, p_authdata);
345                         else
346                                 shared_desc_len =
347                                         cnstr_shdsc_pdcp_u_plane_decap(
348                                                 cdb->sh_desc, 1, swap,
349                                                 ses->pdcp.sn_size,
350                                                 ses->pdcp.hfn,
351                                                 ses->pdcp.bearer,
352                                                 ses->pdcp.pkt_dir,
353                                                 ses->pdcp.hfn_threshold,
354                                                 &cipherdata, p_authdata);
355                 }
356         }
357         return shared_desc_len;
358 }
359
360 /* prepare ipsec proto command block of the session */
361 static int
362 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
363 {
364         struct alginfo cipherdata = {0}, authdata = {0};
365         struct sec_cdb *cdb = &ses->cdb;
366         int32_t shared_desc_len = 0;
367         int err;
368 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
369         int swap = false;
370 #else
371         int swap = true;
372 #endif
373
374         cipherdata.key = (size_t)ses->cipher_key.data;
375         cipherdata.keylen = ses->cipher_key.length;
376         cipherdata.key_enc_flags = 0;
377         cipherdata.key_type = RTA_DATA_IMM;
378         cipherdata.algtype = ses->cipher_key.alg;
379         cipherdata.algmode = ses->cipher_key.algmode;
380
381         if (ses->auth_key.length) {
382                 authdata.key = (size_t)ses->auth_key.data;
383                 authdata.keylen = ses->auth_key.length;
384                 authdata.key_enc_flags = 0;
385                 authdata.key_type = RTA_DATA_IMM;
386                 authdata.algtype = ses->auth_key.alg;
387                 authdata.algmode = ses->auth_key.algmode;
388         }
389
390         cdb->sh_desc[0] = cipherdata.keylen;
391         cdb->sh_desc[1] = authdata.keylen;
392         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
393                                DESC_JOB_IO_LEN,
394                                (unsigned int *)cdb->sh_desc,
395                                &cdb->sh_desc[2], 2);
396
397         if (err < 0) {
398                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
399                 return err;
400         }
401         if (cdb->sh_desc[2] & 1)
402                 cipherdata.key_type = RTA_DATA_IMM;
403         else {
404                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
405                                         (void *)(size_t)cipherdata.key);
406                 cipherdata.key_type = RTA_DATA_PTR;
407         }
408         if (cdb->sh_desc[2] & (1<<1))
409                 authdata.key_type = RTA_DATA_IMM;
410         else {
411                 authdata.key = (size_t)rte_dpaa_mem_vtop(
412                                         (void *)(size_t)authdata.key);
413                 authdata.key_type = RTA_DATA_PTR;
414         }
415
416         cdb->sh_desc[0] = 0;
417         cdb->sh_desc[1] = 0;
418         cdb->sh_desc[2] = 0;
419         if (ses->dir == DIR_ENC) {
420                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
421                                 cdb->sh_desc,
422                                 true, swap, SHR_SERIAL,
423                                 &ses->encap_pdb,
424                                 (uint8_t *)&ses->ip4_hdr,
425                                 &cipherdata, &authdata);
426         } else if (ses->dir == DIR_DEC) {
427                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
428                                 cdb->sh_desc,
429                                 true, swap, SHR_SERIAL,
430                                 &ses->decap_pdb,
431                                 &cipherdata, &authdata);
432         }
433         return shared_desc_len;
434 }
435 #endif
436 /* prepare command block of the session */
437 static int
438 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
439 {
440         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
441         int32_t shared_desc_len = 0;
442         struct sec_cdb *cdb = &ses->cdb;
443         int err;
444 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
445         int swap = false;
446 #else
447         int swap = true;
448 #endif
449
450         memset(cdb, 0, sizeof(struct sec_cdb));
451
452         switch (ses->ctxt) {
453 #ifdef RTE_LIB_SECURITY
454         case DPAA_SEC_IPSEC:
455                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
456                 break;
457         case DPAA_SEC_PDCP:
458                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
459                 break;
460 #endif
461         case DPAA_SEC_CIPHER:
462                 alginfo_c.key = (size_t)ses->cipher_key.data;
463                 alginfo_c.keylen = ses->cipher_key.length;
464                 alginfo_c.key_enc_flags = 0;
465                 alginfo_c.key_type = RTA_DATA_IMM;
466                 alginfo_c.algtype = ses->cipher_key.alg;
467                 alginfo_c.algmode = ses->cipher_key.algmode;
468
469                 switch (ses->cipher_alg) {
470                 case RTE_CRYPTO_CIPHER_AES_CBC:
471                 case RTE_CRYPTO_CIPHER_3DES_CBC:
472                 case RTE_CRYPTO_CIPHER_DES_CBC:
473                 case RTE_CRYPTO_CIPHER_AES_CTR:
474                 case RTE_CRYPTO_CIPHER_3DES_CTR:
475                         shared_desc_len = cnstr_shdsc_blkcipher(
476                                         cdb->sh_desc, true,
477                                         swap, SHR_NEVER, &alginfo_c,
478                                         ses->iv.length,
479                                         ses->dir);
480                         break;
481                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
482                         shared_desc_len = cnstr_shdsc_snow_f8(
483                                         cdb->sh_desc, true, swap,
484                                         &alginfo_c,
485                                         ses->dir);
486                         break;
487                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
488                         shared_desc_len = cnstr_shdsc_zuce(
489                                         cdb->sh_desc, true, swap,
490                                         &alginfo_c,
491                                         ses->dir);
492                         break;
493                 default:
494                         DPAA_SEC_ERR("unsupported cipher alg %d",
495                                      ses->cipher_alg);
496                         return -ENOTSUP;
497                 }
498                 break;
499         case DPAA_SEC_AUTH:
500                 alginfo_a.key = (size_t)ses->auth_key.data;
501                 alginfo_a.keylen = ses->auth_key.length;
502                 alginfo_a.key_enc_flags = 0;
503                 alginfo_a.key_type = RTA_DATA_IMM;
504                 alginfo_a.algtype = ses->auth_key.alg;
505                 alginfo_a.algmode = ses->auth_key.algmode;
506                 switch (ses->auth_alg) {
507                 case RTE_CRYPTO_AUTH_MD5:
508                 case RTE_CRYPTO_AUTH_SHA1:
509                 case RTE_CRYPTO_AUTH_SHA224:
510                 case RTE_CRYPTO_AUTH_SHA256:
511                 case RTE_CRYPTO_AUTH_SHA384:
512                 case RTE_CRYPTO_AUTH_SHA512:
513                         shared_desc_len = cnstr_shdsc_hash(
514                                                 cdb->sh_desc, true,
515                                                 swap, SHR_NEVER, &alginfo_a,
516                                                 !ses->dir,
517                                                 ses->digest_length);
518                         break;
519                 case RTE_CRYPTO_AUTH_MD5_HMAC:
520                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
521                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
522                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
523                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
524                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
525                         shared_desc_len = cnstr_shdsc_hmac(
526                                                 cdb->sh_desc, true,
527                                                 swap, SHR_NEVER, &alginfo_a,
528                                                 !ses->dir,
529                                                 ses->digest_length);
530                         break;
531                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
532                         shared_desc_len = cnstr_shdsc_snow_f9(
533                                                 cdb->sh_desc, true, swap,
534                                                 &alginfo_a,
535                                                 !ses->dir,
536                                                 ses->digest_length);
537                         break;
538                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
539                         shared_desc_len = cnstr_shdsc_zuca(
540                                                 cdb->sh_desc, true, swap,
541                                                 &alginfo_a,
542                                                 !ses->dir,
543                                                 ses->digest_length);
544                         break;
545                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
546                 case RTE_CRYPTO_AUTH_AES_CMAC:
547                         shared_desc_len = cnstr_shdsc_aes_mac(
548                                                 cdb->sh_desc,
549                                                 true, swap, SHR_NEVER,
550                                                 &alginfo_a,
551                                                 !ses->dir,
552                                                 ses->digest_length);
553                         break;
554                 default:
555                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
556                 }
557                 break;
558         case DPAA_SEC_AEAD:
559                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
560                         DPAA_SEC_ERR("not supported aead alg");
561                         return -ENOTSUP;
562                 }
563                 alginfo.key = (size_t)ses->aead_key.data;
564                 alginfo.keylen = ses->aead_key.length;
565                 alginfo.key_enc_flags = 0;
566                 alginfo.key_type = RTA_DATA_IMM;
567                 alginfo.algtype = ses->aead_key.alg;
568                 alginfo.algmode = ses->aead_key.algmode;
569
570                 if (ses->dir == DIR_ENC)
571                         shared_desc_len = cnstr_shdsc_gcm_encap(
572                                         cdb->sh_desc, true, swap, SHR_NEVER,
573                                         &alginfo,
574                                         ses->iv.length,
575                                         ses->digest_length);
576                 else
577                         shared_desc_len = cnstr_shdsc_gcm_decap(
578                                         cdb->sh_desc, true, swap, SHR_NEVER,
579                                         &alginfo,
580                                         ses->iv.length,
581                                         ses->digest_length);
582                 break;
583         case DPAA_SEC_CIPHER_HASH:
584                 alginfo_c.key = (size_t)ses->cipher_key.data;
585                 alginfo_c.keylen = ses->cipher_key.length;
586                 alginfo_c.key_enc_flags = 0;
587                 alginfo_c.key_type = RTA_DATA_IMM;
588                 alginfo_c.algtype = ses->cipher_key.alg;
589                 alginfo_c.algmode = ses->cipher_key.algmode;
590
591                 alginfo_a.key = (size_t)ses->auth_key.data;
592                 alginfo_a.keylen = ses->auth_key.length;
593                 alginfo_a.key_enc_flags = 0;
594                 alginfo_a.key_type = RTA_DATA_IMM;
595                 alginfo_a.algtype = ses->auth_key.alg;
596                 alginfo_a.algmode = ses->auth_key.algmode;
597
598                 cdb->sh_desc[0] = alginfo_c.keylen;
599                 cdb->sh_desc[1] = alginfo_a.keylen;
600                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
601                                        DESC_JOB_IO_LEN,
602                                        (unsigned int *)cdb->sh_desc,
603                                        &cdb->sh_desc[2], 2);
604
605                 if (err < 0) {
606                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
607                         return err;
608                 }
609                 if (cdb->sh_desc[2] & 1)
610                         alginfo_c.key_type = RTA_DATA_IMM;
611                 else {
612                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
613                                                 (void *)(size_t)alginfo_c.key);
614                         alginfo_c.key_type = RTA_DATA_PTR;
615                 }
616                 if (cdb->sh_desc[2] & (1<<1))
617                         alginfo_a.key_type = RTA_DATA_IMM;
618                 else {
619                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
620                                                 (void *)(size_t)alginfo_a.key);
621                         alginfo_a.key_type = RTA_DATA_PTR;
622                 }
623                 cdb->sh_desc[0] = 0;
624                 cdb->sh_desc[1] = 0;
625                 cdb->sh_desc[2] = 0;
626                 /* Auth_only_len is set as 0 here and it will be
627                  * overwritten in fd for each packet.
628                  */
629                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
630                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
631                                 ses->iv.length,
632                                 ses->digest_length, ses->dir);
633                 break;
634         case DPAA_SEC_HASH_CIPHER:
635         default:
636                 DPAA_SEC_ERR("error: Unsupported session");
637                 return -ENOTSUP;
638         }
639
640         if (shared_desc_len < 0) {
641                 DPAA_SEC_ERR("error in preparing command block");
642                 return shared_desc_len;
643         }
644
645         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
646         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
647         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
648
649         return 0;
650 }
651
652 /* qp is lockless, should be accessed by only one thread */
653 static int
654 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
655 {
656         struct qman_fq *fq;
657         unsigned int pkts = 0;
658         int num_rx_bufs, ret;
659         struct qm_dqrr_entry *dq;
660         uint32_t vdqcr_flags = 0;
661
662         fq = &qp->outq;
663         /*
664          * Until request for four buffers, we provide exact number of buffers.
665          * Otherwise we do not set the QM_VDQCR_EXACT flag.
666          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
667          * requested, so we request two less in this case.
668          */
669         if (nb_ops < 4) {
670                 vdqcr_flags = QM_VDQCR_EXACT;
671                 num_rx_bufs = nb_ops;
672         } else {
673                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
674                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
675         }
676         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
677         if (ret)
678                 return 0;
679
680         do {
681                 const struct qm_fd *fd;
682                 struct dpaa_sec_job *job;
683                 struct dpaa_sec_op_ctx *ctx;
684                 struct rte_crypto_op *op;
685
686                 dq = qman_dequeue(fq);
687                 if (!dq)
688                         continue;
689
690                 fd = &dq->fd;
691                 /* sg is embedded in an op ctx,
692                  * sg[0] is for output
693                  * sg[1] for input
694                  */
695                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
696
697                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
698                 ctx->fd_status = fd->status;
699                 op = ctx->op;
700                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
701                         struct qm_sg_entry *sg_out;
702                         uint32_t len;
703                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
704                                                 op->sym->m_src : op->sym->m_dst;
705
706                         sg_out = &job->sg[0];
707                         hw_sg_to_cpu(sg_out);
708                         len = sg_out->length;
709                         mbuf->pkt_len = len;
710                         while (mbuf->next != NULL) {
711                                 len -= mbuf->data_len;
712                                 mbuf = mbuf->next;
713                         }
714                         mbuf->data_len = len;
715                 }
716                 if (!ctx->fd_status) {
717                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
718                 } else {
719                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
720                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
721                 }
722                 ops[pkts++] = op;
723
724                 /* report op status to sym->op and then free the ctx memory */
725                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
726
727                 qman_dqrr_consume(fq, dq);
728         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
729
730         return pkts;
731 }
732
733 static inline struct dpaa_sec_job *
734 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
735 {
736         struct rte_crypto_sym_op *sym = op->sym;
737         struct rte_mbuf *mbuf = sym->m_src;
738         struct dpaa_sec_job *cf;
739         struct dpaa_sec_op_ctx *ctx;
740         struct qm_sg_entry *sg, *out_sg, *in_sg;
741         phys_addr_t start_addr;
742         uint8_t *old_digest, extra_segs;
743         int data_len, data_offset;
744
745         data_len = sym->auth.data.length;
746         data_offset = sym->auth.data.offset;
747
748         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
749             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
750                 if ((data_len & 7) || (data_offset & 7)) {
751                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
752                         return NULL;
753                 }
754
755                 data_len = data_len >> 3;
756                 data_offset = data_offset >> 3;
757         }
758
759         if (is_decode(ses))
760                 extra_segs = 3;
761         else
762                 extra_segs = 2;
763
764         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
765                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
766                                 MAX_SG_ENTRIES);
767                 return NULL;
768         }
769         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
770         if (!ctx)
771                 return NULL;
772
773         cf = &ctx->job;
774         ctx->op = op;
775         old_digest = ctx->digest;
776
777         /* output */
778         out_sg = &cf->sg[0];
779         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
780         out_sg->length = ses->digest_length;
781         cpu_to_hw_sg(out_sg);
782
783         /* input */
784         in_sg = &cf->sg[1];
785         /* need to extend the input to a compound frame */
786         in_sg->extension = 1;
787         in_sg->final = 1;
788         in_sg->length = data_len;
789         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
790
791         /* 1st seg */
792         sg = in_sg + 1;
793
794         if (ses->iv.length) {
795                 uint8_t *iv_ptr;
796
797                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
798                                                    ses->iv.offset);
799
800                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
801                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
802                         sg->length = 12;
803                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
804                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
805                         sg->length = 8;
806                 } else {
807                         sg->length = ses->iv.length;
808                 }
809                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
810                 in_sg->length += sg->length;
811                 cpu_to_hw_sg(sg);
812                 sg++;
813         }
814
815         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
816         sg->offset = data_offset;
817
818         if (data_len <= (mbuf->data_len - data_offset)) {
819                 sg->length = data_len;
820         } else {
821                 sg->length = mbuf->data_len - data_offset;
822
823                 /* remaining i/p segs */
824                 while ((data_len = data_len - sg->length) &&
825                        (mbuf = mbuf->next)) {
826                         cpu_to_hw_sg(sg);
827                         sg++;
828                         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
829                         if (data_len > mbuf->data_len)
830                                 sg->length = mbuf->data_len;
831                         else
832                                 sg->length = data_len;
833                 }
834         }
835
836         if (is_decode(ses)) {
837                 /* Digest verification case */
838                 cpu_to_hw_sg(sg);
839                 sg++;
840                 rte_memcpy(old_digest, sym->auth.digest.data,
841                                 ses->digest_length);
842                 start_addr = rte_dpaa_mem_vtop(old_digest);
843                 qm_sg_entry_set64(sg, start_addr);
844                 sg->length = ses->digest_length;
845                 in_sg->length += ses->digest_length;
846         }
847         sg->final = 1;
848         cpu_to_hw_sg(sg);
849         cpu_to_hw_sg(in_sg);
850
851         return cf;
852 }
853
854 /**
855  * packet looks like:
856  *              |<----data_len------->|
857  *    |ip_header|ah_header|icv|payload|
858  *              ^
859  *              |
860  *         mbuf->pkt.data
861  */
862 static inline struct dpaa_sec_job *
863 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
864 {
865         struct rte_crypto_sym_op *sym = op->sym;
866         struct rte_mbuf *mbuf = sym->m_src;
867         struct dpaa_sec_job *cf;
868         struct dpaa_sec_op_ctx *ctx;
869         struct qm_sg_entry *sg, *in_sg;
870         rte_iova_t start_addr;
871         uint8_t *old_digest;
872         int data_len, data_offset;
873
874         data_len = sym->auth.data.length;
875         data_offset = sym->auth.data.offset;
876
877         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
878             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
879                 if ((data_len & 7) || (data_offset & 7)) {
880                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
881                         return NULL;
882                 }
883
884                 data_len = data_len >> 3;
885                 data_offset = data_offset >> 3;
886         }
887
888         ctx = dpaa_sec_alloc_ctx(ses, 4);
889         if (!ctx)
890                 return NULL;
891
892         cf = &ctx->job;
893         ctx->op = op;
894         old_digest = ctx->digest;
895
896         start_addr = rte_pktmbuf_iova(mbuf);
897         /* output */
898         sg = &cf->sg[0];
899         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
900         sg->length = ses->digest_length;
901         cpu_to_hw_sg(sg);
902
903         /* input */
904         in_sg = &cf->sg[1];
905         /* need to extend the input to a compound frame */
906         in_sg->extension = 1;
907         in_sg->final = 1;
908         in_sg->length = data_len;
909         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
910         sg = &cf->sg[2];
911
912         if (ses->iv.length) {
913                 uint8_t *iv_ptr;
914
915                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
916                                                    ses->iv.offset);
917
918                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
919                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
920                         sg->length = 12;
921                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
922                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
923                         sg->length = 8;
924                 } else {
925                         sg->length = ses->iv.length;
926                 }
927                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
928                 in_sg->length += sg->length;
929                 cpu_to_hw_sg(sg);
930                 sg++;
931         }
932
933         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
934         sg->offset = data_offset;
935         sg->length = data_len;
936
937         if (is_decode(ses)) {
938                 /* Digest verification case */
939                 cpu_to_hw_sg(sg);
940                 /* hash result or digest, save digest first */
941                 rte_memcpy(old_digest, sym->auth.digest.data,
942                                 ses->digest_length);
943                 /* let's check digest by hw */
944                 start_addr = rte_dpaa_mem_vtop(old_digest);
945                 sg++;
946                 qm_sg_entry_set64(sg, start_addr);
947                 sg->length = ses->digest_length;
948                 in_sg->length += ses->digest_length;
949         }
950         sg->final = 1;
951         cpu_to_hw_sg(sg);
952         cpu_to_hw_sg(in_sg);
953
954         return cf;
955 }
956
957 static inline struct dpaa_sec_job *
958 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
959 {
960         struct rte_crypto_sym_op *sym = op->sym;
961         struct dpaa_sec_job *cf;
962         struct dpaa_sec_op_ctx *ctx;
963         struct qm_sg_entry *sg, *out_sg, *in_sg;
964         struct rte_mbuf *mbuf;
965         uint8_t req_segs;
966         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
967                         ses->iv.offset);
968         int data_len, data_offset;
969
970         data_len = sym->cipher.data.length;
971         data_offset = sym->cipher.data.offset;
972
973         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
974                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
975                 if ((data_len & 7) || (data_offset & 7)) {
976                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
977                         return NULL;
978                 }
979
980                 data_len = data_len >> 3;
981                 data_offset = data_offset >> 3;
982         }
983
984         if (sym->m_dst) {
985                 mbuf = sym->m_dst;
986                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
987         } else {
988                 mbuf = sym->m_src;
989                 req_segs = mbuf->nb_segs * 2 + 3;
990         }
991         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
992                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
993                                 MAX_SG_ENTRIES);
994                 return NULL;
995         }
996
997         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
998         if (!ctx)
999                 return NULL;
1000
1001         cf = &ctx->job;
1002         ctx->op = op;
1003
1004         /* output */
1005         out_sg = &cf->sg[0];
1006         out_sg->extension = 1;
1007         out_sg->length = data_len;
1008         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1009         cpu_to_hw_sg(out_sg);
1010
1011         /* 1st seg */
1012         sg = &cf->sg[2];
1013         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1014         sg->length = mbuf->data_len - data_offset;
1015         sg->offset = data_offset;
1016
1017         /* Successive segs */
1018         mbuf = mbuf->next;
1019         while (mbuf) {
1020                 cpu_to_hw_sg(sg);
1021                 sg++;
1022                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1023                 sg->length = mbuf->data_len;
1024                 mbuf = mbuf->next;
1025         }
1026         sg->final = 1;
1027         cpu_to_hw_sg(sg);
1028
1029         /* input */
1030         mbuf = sym->m_src;
1031         in_sg = &cf->sg[1];
1032         in_sg->extension = 1;
1033         in_sg->final = 1;
1034         in_sg->length = data_len + ses->iv.length;
1035
1036         sg++;
1037         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1038         cpu_to_hw_sg(in_sg);
1039
1040         /* IV */
1041         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1042         sg->length = ses->iv.length;
1043         cpu_to_hw_sg(sg);
1044
1045         /* 1st seg */
1046         sg++;
1047         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1048         sg->length = mbuf->data_len - data_offset;
1049         sg->offset = data_offset;
1050
1051         /* Successive segs */
1052         mbuf = mbuf->next;
1053         while (mbuf) {
1054                 cpu_to_hw_sg(sg);
1055                 sg++;
1056                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1057                 sg->length = mbuf->data_len;
1058                 mbuf = mbuf->next;
1059         }
1060         sg->final = 1;
1061         cpu_to_hw_sg(sg);
1062
1063         return cf;
1064 }
1065
1066 static inline struct dpaa_sec_job *
1067 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1068 {
1069         struct rte_crypto_sym_op *sym = op->sym;
1070         struct dpaa_sec_job *cf;
1071         struct dpaa_sec_op_ctx *ctx;
1072         struct qm_sg_entry *sg;
1073         rte_iova_t src_start_addr, dst_start_addr;
1074         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1075                         ses->iv.offset);
1076         int data_len, data_offset;
1077
1078         data_len = sym->cipher.data.length;
1079         data_offset = sym->cipher.data.offset;
1080
1081         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1082                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1083                 if ((data_len & 7) || (data_offset & 7)) {
1084                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1085                         return NULL;
1086                 }
1087
1088                 data_len = data_len >> 3;
1089                 data_offset = data_offset >> 3;
1090         }
1091
1092         ctx = dpaa_sec_alloc_ctx(ses, 4);
1093         if (!ctx)
1094                 return NULL;
1095
1096         cf = &ctx->job;
1097         ctx->op = op;
1098
1099         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1100
1101         if (sym->m_dst)
1102                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1103         else
1104                 dst_start_addr = src_start_addr;
1105
1106         /* output */
1107         sg = &cf->sg[0];
1108         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1109         sg->length = data_len + ses->iv.length;
1110         cpu_to_hw_sg(sg);
1111
1112         /* input */
1113         sg = &cf->sg[1];
1114
1115         /* need to extend the input to a compound frame */
1116         sg->extension = 1;
1117         sg->final = 1;
1118         sg->length = data_len + ses->iv.length;
1119         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1120         cpu_to_hw_sg(sg);
1121
1122         sg = &cf->sg[2];
1123         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1124         sg->length = ses->iv.length;
1125         cpu_to_hw_sg(sg);
1126
1127         sg++;
1128         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1129         sg->length = data_len;
1130         sg->final = 1;
1131         cpu_to_hw_sg(sg);
1132
1133         return cf;
1134 }
1135
1136 static inline struct dpaa_sec_job *
1137 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1138 {
1139         struct rte_crypto_sym_op *sym = op->sym;
1140         struct dpaa_sec_job *cf;
1141         struct dpaa_sec_op_ctx *ctx;
1142         struct qm_sg_entry *sg, *out_sg, *in_sg;
1143         struct rte_mbuf *mbuf;
1144         uint8_t req_segs;
1145         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1146                         ses->iv.offset);
1147
1148         if (sym->m_dst) {
1149                 mbuf = sym->m_dst;
1150                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1151         } else {
1152                 mbuf = sym->m_src;
1153                 req_segs = mbuf->nb_segs * 2 + 4;
1154         }
1155
1156         if (ses->auth_only_len)
1157                 req_segs++;
1158
1159         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1160                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1161                                 MAX_SG_ENTRIES);
1162                 return NULL;
1163         }
1164
1165         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1166         if (!ctx)
1167                 return NULL;
1168
1169         cf = &ctx->job;
1170         ctx->op = op;
1171
1172         rte_prefetch0(cf->sg);
1173
1174         /* output */
1175         out_sg = &cf->sg[0];
1176         out_sg->extension = 1;
1177         if (is_encode(ses))
1178                 out_sg->length = sym->aead.data.length + ses->digest_length;
1179         else
1180                 out_sg->length = sym->aead.data.length;
1181
1182         /* output sg entries */
1183         sg = &cf->sg[2];
1184         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1185         cpu_to_hw_sg(out_sg);
1186
1187         /* 1st seg */
1188         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1189         sg->length = mbuf->data_len - sym->aead.data.offset;
1190         sg->offset = sym->aead.data.offset;
1191
1192         /* Successive segs */
1193         mbuf = mbuf->next;
1194         while (mbuf) {
1195                 cpu_to_hw_sg(sg);
1196                 sg++;
1197                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1198                 sg->length = mbuf->data_len;
1199                 mbuf = mbuf->next;
1200         }
1201         sg->length -= ses->digest_length;
1202
1203         if (is_encode(ses)) {
1204                 cpu_to_hw_sg(sg);
1205                 /* set auth output */
1206                 sg++;
1207                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1208                 sg->length = ses->digest_length;
1209         }
1210         sg->final = 1;
1211         cpu_to_hw_sg(sg);
1212
1213         /* input */
1214         mbuf = sym->m_src;
1215         in_sg = &cf->sg[1];
1216         in_sg->extension = 1;
1217         in_sg->final = 1;
1218         if (is_encode(ses))
1219                 in_sg->length = ses->iv.length + sym->aead.data.length
1220                                                         + ses->auth_only_len;
1221         else
1222                 in_sg->length = ses->iv.length + sym->aead.data.length
1223                                 + ses->auth_only_len + ses->digest_length;
1224
1225         /* input sg entries */
1226         sg++;
1227         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1228         cpu_to_hw_sg(in_sg);
1229
1230         /* 1st seg IV */
1231         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1232         sg->length = ses->iv.length;
1233         cpu_to_hw_sg(sg);
1234
1235         /* 2nd seg auth only */
1236         if (ses->auth_only_len) {
1237                 sg++;
1238                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1239                 sg->length = ses->auth_only_len;
1240                 cpu_to_hw_sg(sg);
1241         }
1242
1243         /* 3rd seg */
1244         sg++;
1245         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1246         sg->length = mbuf->data_len - sym->aead.data.offset;
1247         sg->offset = sym->aead.data.offset;
1248
1249         /* Successive segs */
1250         mbuf = mbuf->next;
1251         while (mbuf) {
1252                 cpu_to_hw_sg(sg);
1253                 sg++;
1254                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1255                 sg->length = mbuf->data_len;
1256                 mbuf = mbuf->next;
1257         }
1258
1259         if (is_decode(ses)) {
1260                 cpu_to_hw_sg(sg);
1261                 sg++;
1262                 memcpy(ctx->digest, sym->aead.digest.data,
1263                         ses->digest_length);
1264                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1265                 sg->length = ses->digest_length;
1266         }
1267         sg->final = 1;
1268         cpu_to_hw_sg(sg);
1269
1270         return cf;
1271 }
1272
1273 static inline struct dpaa_sec_job *
1274 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1275 {
1276         struct rte_crypto_sym_op *sym = op->sym;
1277         struct dpaa_sec_job *cf;
1278         struct dpaa_sec_op_ctx *ctx;
1279         struct qm_sg_entry *sg;
1280         uint32_t length = 0;
1281         rte_iova_t src_start_addr, dst_start_addr;
1282         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1283                         ses->iv.offset);
1284
1285         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1286
1287         if (sym->m_dst)
1288                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1289         else
1290                 dst_start_addr = src_start_addr;
1291
1292         ctx = dpaa_sec_alloc_ctx(ses, 7);
1293         if (!ctx)
1294                 return NULL;
1295
1296         cf = &ctx->job;
1297         ctx->op = op;
1298
1299         /* input */
1300         rte_prefetch0(cf->sg);
1301         sg = &cf->sg[2];
1302         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1303         if (is_encode(ses)) {
1304                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1305                 sg->length = ses->iv.length;
1306                 length += sg->length;
1307                 cpu_to_hw_sg(sg);
1308
1309                 sg++;
1310                 if (ses->auth_only_len) {
1311                         qm_sg_entry_set64(sg,
1312                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1313                         sg->length = ses->auth_only_len;
1314                         length += sg->length;
1315                         cpu_to_hw_sg(sg);
1316                         sg++;
1317                 }
1318                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1319                 sg->length = sym->aead.data.length;
1320                 length += sg->length;
1321                 sg->final = 1;
1322                 cpu_to_hw_sg(sg);
1323         } else {
1324                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1325                 sg->length = ses->iv.length;
1326                 length += sg->length;
1327                 cpu_to_hw_sg(sg);
1328
1329                 sg++;
1330                 if (ses->auth_only_len) {
1331                         qm_sg_entry_set64(sg,
1332                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1333                         sg->length = ses->auth_only_len;
1334                         length += sg->length;
1335                         cpu_to_hw_sg(sg);
1336                         sg++;
1337                 }
1338                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1339                 sg->length = sym->aead.data.length;
1340                 length += sg->length;
1341                 cpu_to_hw_sg(sg);
1342
1343                 memcpy(ctx->digest, sym->aead.digest.data,
1344                        ses->digest_length);
1345                 sg++;
1346
1347                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1348                 sg->length = ses->digest_length;
1349                 length += sg->length;
1350                 sg->final = 1;
1351                 cpu_to_hw_sg(sg);
1352         }
1353         /* input compound frame */
1354         cf->sg[1].length = length;
1355         cf->sg[1].extension = 1;
1356         cf->sg[1].final = 1;
1357         cpu_to_hw_sg(&cf->sg[1]);
1358
1359         /* output */
1360         sg++;
1361         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1362         qm_sg_entry_set64(sg,
1363                 dst_start_addr + sym->aead.data.offset);
1364         sg->length = sym->aead.data.length;
1365         length = sg->length;
1366         if (is_encode(ses)) {
1367                 cpu_to_hw_sg(sg);
1368                 /* set auth output */
1369                 sg++;
1370                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1371                 sg->length = ses->digest_length;
1372                 length += sg->length;
1373         }
1374         sg->final = 1;
1375         cpu_to_hw_sg(sg);
1376
1377         /* output compound frame */
1378         cf->sg[0].length = length;
1379         cf->sg[0].extension = 1;
1380         cpu_to_hw_sg(&cf->sg[0]);
1381
1382         return cf;
1383 }
1384
1385 static inline struct dpaa_sec_job *
1386 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1387 {
1388         struct rte_crypto_sym_op *sym = op->sym;
1389         struct dpaa_sec_job *cf;
1390         struct dpaa_sec_op_ctx *ctx;
1391         struct qm_sg_entry *sg, *out_sg, *in_sg;
1392         struct rte_mbuf *mbuf;
1393         uint8_t req_segs;
1394         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1395                         ses->iv.offset);
1396
1397         if (sym->m_dst) {
1398                 mbuf = sym->m_dst;
1399                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1400         } else {
1401                 mbuf = sym->m_src;
1402                 req_segs = mbuf->nb_segs * 2 + 4;
1403         }
1404
1405         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1406                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1407                                 MAX_SG_ENTRIES);
1408                 return NULL;
1409         }
1410
1411         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1412         if (!ctx)
1413                 return NULL;
1414
1415         cf = &ctx->job;
1416         ctx->op = op;
1417
1418         rte_prefetch0(cf->sg);
1419
1420         /* output */
1421         out_sg = &cf->sg[0];
1422         out_sg->extension = 1;
1423         if (is_encode(ses))
1424                 out_sg->length = sym->auth.data.length + ses->digest_length;
1425         else
1426                 out_sg->length = sym->auth.data.length;
1427
1428         /* output sg entries */
1429         sg = &cf->sg[2];
1430         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1431         cpu_to_hw_sg(out_sg);
1432
1433         /* 1st seg */
1434         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1435         sg->length = mbuf->data_len - sym->auth.data.offset;
1436         sg->offset = sym->auth.data.offset;
1437
1438         /* Successive segs */
1439         mbuf = mbuf->next;
1440         while (mbuf) {
1441                 cpu_to_hw_sg(sg);
1442                 sg++;
1443                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1444                 sg->length = mbuf->data_len;
1445                 mbuf = mbuf->next;
1446         }
1447         sg->length -= ses->digest_length;
1448
1449         if (is_encode(ses)) {
1450                 cpu_to_hw_sg(sg);
1451                 /* set auth output */
1452                 sg++;
1453                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1454                 sg->length = ses->digest_length;
1455         }
1456         sg->final = 1;
1457         cpu_to_hw_sg(sg);
1458
1459         /* input */
1460         mbuf = sym->m_src;
1461         in_sg = &cf->sg[1];
1462         in_sg->extension = 1;
1463         in_sg->final = 1;
1464         if (is_encode(ses))
1465                 in_sg->length = ses->iv.length + sym->auth.data.length;
1466         else
1467                 in_sg->length = ses->iv.length + sym->auth.data.length
1468                                                 + ses->digest_length;
1469
1470         /* input sg entries */
1471         sg++;
1472         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1473         cpu_to_hw_sg(in_sg);
1474
1475         /* 1st seg IV */
1476         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1477         sg->length = ses->iv.length;
1478         cpu_to_hw_sg(sg);
1479
1480         /* 2nd seg */
1481         sg++;
1482         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1483         sg->length = mbuf->data_len - sym->auth.data.offset;
1484         sg->offset = sym->auth.data.offset;
1485
1486         /* Successive segs */
1487         mbuf = mbuf->next;
1488         while (mbuf) {
1489                 cpu_to_hw_sg(sg);
1490                 sg++;
1491                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1492                 sg->length = mbuf->data_len;
1493                 mbuf = mbuf->next;
1494         }
1495
1496         sg->length -= ses->digest_length;
1497         if (is_decode(ses)) {
1498                 cpu_to_hw_sg(sg);
1499                 sg++;
1500                 memcpy(ctx->digest, sym->auth.digest.data,
1501                         ses->digest_length);
1502                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1503                 sg->length = ses->digest_length;
1504         }
1505         sg->final = 1;
1506         cpu_to_hw_sg(sg);
1507
1508         return cf;
1509 }
1510
1511 static inline struct dpaa_sec_job *
1512 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1513 {
1514         struct rte_crypto_sym_op *sym = op->sym;
1515         struct dpaa_sec_job *cf;
1516         struct dpaa_sec_op_ctx *ctx;
1517         struct qm_sg_entry *sg;
1518         rte_iova_t src_start_addr, dst_start_addr;
1519         uint32_t length = 0;
1520         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1521                         ses->iv.offset);
1522
1523         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1524         if (sym->m_dst)
1525                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1526         else
1527                 dst_start_addr = src_start_addr;
1528
1529         ctx = dpaa_sec_alloc_ctx(ses, 7);
1530         if (!ctx)
1531                 return NULL;
1532
1533         cf = &ctx->job;
1534         ctx->op = op;
1535
1536         /* input */
1537         rte_prefetch0(cf->sg);
1538         sg = &cf->sg[2];
1539         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1540         if (is_encode(ses)) {
1541                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1542                 sg->length = ses->iv.length;
1543                 length += sg->length;
1544                 cpu_to_hw_sg(sg);
1545
1546                 sg++;
1547                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1548                 sg->length = sym->auth.data.length;
1549                 length += sg->length;
1550                 sg->final = 1;
1551                 cpu_to_hw_sg(sg);
1552         } else {
1553                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1554                 sg->length = ses->iv.length;
1555                 length += sg->length;
1556                 cpu_to_hw_sg(sg);
1557
1558                 sg++;
1559
1560                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1561                 sg->length = sym->auth.data.length;
1562                 length += sg->length;
1563                 cpu_to_hw_sg(sg);
1564
1565                 memcpy(ctx->digest, sym->auth.digest.data,
1566                        ses->digest_length);
1567                 sg++;
1568
1569                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1570                 sg->length = ses->digest_length;
1571                 length += sg->length;
1572                 sg->final = 1;
1573                 cpu_to_hw_sg(sg);
1574         }
1575         /* input compound frame */
1576         cf->sg[1].length = length;
1577         cf->sg[1].extension = 1;
1578         cf->sg[1].final = 1;
1579         cpu_to_hw_sg(&cf->sg[1]);
1580
1581         /* output */
1582         sg++;
1583         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1584         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1585         sg->length = sym->cipher.data.length;
1586         length = sg->length;
1587         if (is_encode(ses)) {
1588                 cpu_to_hw_sg(sg);
1589                 /* set auth output */
1590                 sg++;
1591                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1592                 sg->length = ses->digest_length;
1593                 length += sg->length;
1594         }
1595         sg->final = 1;
1596         cpu_to_hw_sg(sg);
1597
1598         /* output compound frame */
1599         cf->sg[0].length = length;
1600         cf->sg[0].extension = 1;
1601         cpu_to_hw_sg(&cf->sg[0]);
1602
1603         return cf;
1604 }
1605
1606 #ifdef RTE_LIB_SECURITY
1607 static inline struct dpaa_sec_job *
1608 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1609 {
1610         struct rte_crypto_sym_op *sym = op->sym;
1611         struct dpaa_sec_job *cf;
1612         struct dpaa_sec_op_ctx *ctx;
1613         struct qm_sg_entry *sg;
1614         phys_addr_t src_start_addr, dst_start_addr;
1615
1616         ctx = dpaa_sec_alloc_ctx(ses, 2);
1617         if (!ctx)
1618                 return NULL;
1619         cf = &ctx->job;
1620         ctx->op = op;
1621
1622         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1623
1624         if (sym->m_dst)
1625                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1626         else
1627                 dst_start_addr = src_start_addr;
1628
1629         /* input */
1630         sg = &cf->sg[1];
1631         qm_sg_entry_set64(sg, src_start_addr);
1632         sg->length = sym->m_src->pkt_len;
1633         sg->final = 1;
1634         cpu_to_hw_sg(sg);
1635
1636         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1637         /* output */
1638         sg = &cf->sg[0];
1639         qm_sg_entry_set64(sg, dst_start_addr);
1640         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1641         cpu_to_hw_sg(sg);
1642
1643         return cf;
1644 }
1645
1646 static inline struct dpaa_sec_job *
1647 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1648 {
1649         struct rte_crypto_sym_op *sym = op->sym;
1650         struct dpaa_sec_job *cf;
1651         struct dpaa_sec_op_ctx *ctx;
1652         struct qm_sg_entry *sg, *out_sg, *in_sg;
1653         struct rte_mbuf *mbuf;
1654         uint8_t req_segs;
1655         uint32_t in_len = 0, out_len = 0;
1656
1657         if (sym->m_dst)
1658                 mbuf = sym->m_dst;
1659         else
1660                 mbuf = sym->m_src;
1661
1662         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1663         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1664                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1665                                 MAX_SG_ENTRIES);
1666                 return NULL;
1667         }
1668
1669         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1670         if (!ctx)
1671                 return NULL;
1672         cf = &ctx->job;
1673         ctx->op = op;
1674         /* output */
1675         out_sg = &cf->sg[0];
1676         out_sg->extension = 1;
1677         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1678
1679         /* 1st seg */
1680         sg = &cf->sg[2];
1681         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1682         sg->offset = 0;
1683
1684         /* Successive segs */
1685         while (mbuf->next) {
1686                 sg->length = mbuf->data_len;
1687                 out_len += sg->length;
1688                 mbuf = mbuf->next;
1689                 cpu_to_hw_sg(sg);
1690                 sg++;
1691                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1692                 sg->offset = 0;
1693         }
1694         sg->length = mbuf->buf_len - mbuf->data_off;
1695         out_len += sg->length;
1696         sg->final = 1;
1697         cpu_to_hw_sg(sg);
1698
1699         out_sg->length = out_len;
1700         cpu_to_hw_sg(out_sg);
1701
1702         /* input */
1703         mbuf = sym->m_src;
1704         in_sg = &cf->sg[1];
1705         in_sg->extension = 1;
1706         in_sg->final = 1;
1707         in_len = mbuf->data_len;
1708
1709         sg++;
1710         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1711
1712         /* 1st seg */
1713         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1714         sg->length = mbuf->data_len;
1715         sg->offset = 0;
1716
1717         /* Successive segs */
1718         mbuf = mbuf->next;
1719         while (mbuf) {
1720                 cpu_to_hw_sg(sg);
1721                 sg++;
1722                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1723                 sg->length = mbuf->data_len;
1724                 sg->offset = 0;
1725                 in_len += sg->length;
1726                 mbuf = mbuf->next;
1727         }
1728         sg->final = 1;
1729         cpu_to_hw_sg(sg);
1730
1731         in_sg->length = in_len;
1732         cpu_to_hw_sg(in_sg);
1733
1734         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1735
1736         return cf;
1737 }
1738 #endif
1739
1740 static uint16_t
1741 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1742                        uint16_t nb_ops)
1743 {
1744         /* Function to transmit the frames to given device and queuepair */
1745         uint32_t loop;
1746         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1747         uint16_t num_tx = 0;
1748         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1749         uint32_t frames_to_send;
1750         struct rte_crypto_op *op;
1751         struct dpaa_sec_job *cf;
1752         dpaa_sec_session *ses;
1753         uint16_t auth_hdr_len, auth_tail_len;
1754         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1755         struct qman_fq *inq[DPAA_SEC_BURST];
1756
1757         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1758                 if (rte_dpaa_portal_init((void *)0)) {
1759                         DPAA_SEC_ERR("Failure in affining portal");
1760                         return 0;
1761                 }
1762         }
1763
1764         while (nb_ops) {
1765                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1766                                 DPAA_SEC_BURST : nb_ops;
1767                 for (loop = 0; loop < frames_to_send; loop++) {
1768                         op = *(ops++);
1769                         if (*dpaa_seqn(op->sym->m_src) != 0) {
1770                                 index = *dpaa_seqn(op->sym->m_src) - 1;
1771                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1772                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1773                                         flags[loop] = ((index & 0x0f) << 8);
1774                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1775                                         DPAA_PER_LCORE_DQRR_SIZE--;
1776                                         DPAA_PER_LCORE_DQRR_HELD &=
1777                                                                 ~(1 << index);
1778                                 }
1779                         }
1780
1781                         switch (op->sess_type) {
1782                         case RTE_CRYPTO_OP_WITH_SESSION:
1783                                 ses = (dpaa_sec_session *)
1784                                         get_sym_session_private_data(
1785                                                 op->sym->session,
1786                                                 dpaa_cryptodev_driver_id);
1787                                 break;
1788 #ifdef RTE_LIB_SECURITY
1789                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1790                                 ses = (dpaa_sec_session *)
1791                                         get_sec_session_private_data(
1792                                                         op->sym->sec_session);
1793                                 break;
1794 #endif
1795                         default:
1796                                 DPAA_SEC_DP_ERR(
1797                                         "sessionless crypto op not supported");
1798                                 frames_to_send = loop;
1799                                 nb_ops = loop;
1800                                 goto send_pkts;
1801                         }
1802
1803                         if (!ses) {
1804                                 DPAA_SEC_DP_ERR("session not available");
1805                                 frames_to_send = loop;
1806                                 nb_ops = loop;
1807                                 goto send_pkts;
1808                         }
1809
1810                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1811                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1812                                         frames_to_send = loop;
1813                                         nb_ops = loop;
1814                                         goto send_pkts;
1815                                 }
1816                         } else if (unlikely(ses->qp[rte_lcore_id() %
1817                                                 MAX_DPAA_CORES] != qp)) {
1818                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1819                                         " New qp = %p\n",
1820                                         ses->qp[rte_lcore_id() %
1821                                         MAX_DPAA_CORES], qp);
1822                                 frames_to_send = loop;
1823                                 nb_ops = loop;
1824                                 goto send_pkts;
1825                         }
1826
1827                         auth_hdr_len = op->sym->auth.data.length -
1828                                                 op->sym->cipher.data.length;
1829                         auth_tail_len = 0;
1830
1831                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1832                                   ((op->sym->m_dst == NULL) ||
1833                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1834                                 switch (ses->ctxt) {
1835 #ifdef RTE_LIB_SECURITY
1836                                 case DPAA_SEC_PDCP:
1837                                 case DPAA_SEC_IPSEC:
1838                                         cf = build_proto(op, ses);
1839                                         break;
1840 #endif
1841                                 case DPAA_SEC_AUTH:
1842                                         cf = build_auth_only(op, ses);
1843                                         break;
1844                                 case DPAA_SEC_CIPHER:
1845                                         cf = build_cipher_only(op, ses);
1846                                         break;
1847                                 case DPAA_SEC_AEAD:
1848                                         cf = build_cipher_auth_gcm(op, ses);
1849                                         auth_hdr_len = ses->auth_only_len;
1850                                         break;
1851                                 case DPAA_SEC_CIPHER_HASH:
1852                                         auth_hdr_len =
1853                                                 op->sym->cipher.data.offset
1854                                                 - op->sym->auth.data.offset;
1855                                         auth_tail_len =
1856                                                 op->sym->auth.data.length
1857                                                 - op->sym->cipher.data.length
1858                                                 - auth_hdr_len;
1859                                         cf = build_cipher_auth(op, ses);
1860                                         break;
1861                                 default:
1862                                         DPAA_SEC_DP_ERR("not supported ops");
1863                                         frames_to_send = loop;
1864                                         nb_ops = loop;
1865                                         goto send_pkts;
1866                                 }
1867                         } else {
1868                                 switch (ses->ctxt) {
1869 #ifdef RTE_LIB_SECURITY
1870                                 case DPAA_SEC_PDCP:
1871                                 case DPAA_SEC_IPSEC:
1872                                         cf = build_proto_sg(op, ses);
1873                                         break;
1874 #endif
1875                                 case DPAA_SEC_AUTH:
1876                                         cf = build_auth_only_sg(op, ses);
1877                                         break;
1878                                 case DPAA_SEC_CIPHER:
1879                                         cf = build_cipher_only_sg(op, ses);
1880                                         break;
1881                                 case DPAA_SEC_AEAD:
1882                                         cf = build_cipher_auth_gcm_sg(op, ses);
1883                                         auth_hdr_len = ses->auth_only_len;
1884                                         break;
1885                                 case DPAA_SEC_CIPHER_HASH:
1886                                         auth_hdr_len =
1887                                                 op->sym->cipher.data.offset
1888                                                 - op->sym->auth.data.offset;
1889                                         auth_tail_len =
1890                                                 op->sym->auth.data.length
1891                                                 - op->sym->cipher.data.length
1892                                                 - auth_hdr_len;
1893                                         cf = build_cipher_auth_sg(op, ses);
1894                                         break;
1895                                 default:
1896                                         DPAA_SEC_DP_ERR("not supported ops");
1897                                         frames_to_send = loop;
1898                                         nb_ops = loop;
1899                                         goto send_pkts;
1900                                 }
1901                         }
1902                         if (unlikely(!cf)) {
1903                                 frames_to_send = loop;
1904                                 nb_ops = loop;
1905                                 goto send_pkts;
1906                         }
1907
1908                         fd = &fds[loop];
1909                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1910                         fd->opaque_addr = 0;
1911                         fd->cmd = 0;
1912                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1913                         fd->_format1 = qm_fd_compound;
1914                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1915
1916                         /* Auth_only_len is set as 0 in descriptor and it is
1917                          * overwritten here in the fd.cmd which will update
1918                          * the DPOVRD reg.
1919                          */
1920                         if (auth_hdr_len || auth_tail_len) {
1921                                 fd->cmd = 0x80000000;
1922                                 fd->cmd |=
1923                                         ((auth_tail_len << 16) | auth_hdr_len);
1924                         }
1925
1926 #ifdef RTE_LIB_SECURITY
1927                         /* In case of PDCP, per packet HFN is stored in
1928                          * mbuf priv after sym_op.
1929                          */
1930                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1931                                 fd->cmd = 0x80000000 |
1932                                         *((uint32_t *)((uint8_t *)op +
1933                                         ses->pdcp.hfn_ovd_offset));
1934                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1935                                         *((uint32_t *)((uint8_t *)op +
1936                                         ses->pdcp.hfn_ovd_offset)),
1937                                         ses->pdcp.hfn_ovd);
1938                         }
1939 #endif
1940                 }
1941 send_pkts:
1942                 loop = 0;
1943                 while (loop < frames_to_send) {
1944                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1945                                         &flags[loop], frames_to_send - loop);
1946                 }
1947                 nb_ops -= frames_to_send;
1948                 num_tx += frames_to_send;
1949         }
1950
1951         dpaa_qp->tx_pkts += num_tx;
1952         dpaa_qp->tx_errs += nb_ops - num_tx;
1953
1954         return num_tx;
1955 }
1956
1957 static uint16_t
1958 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1959                        uint16_t nb_ops)
1960 {
1961         uint16_t num_rx;
1962         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1963
1964         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1965                 if (rte_dpaa_portal_init((void *)0)) {
1966                         DPAA_SEC_ERR("Failure in affining portal");
1967                         return 0;
1968                 }
1969         }
1970
1971         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1972
1973         dpaa_qp->rx_pkts += num_rx;
1974         dpaa_qp->rx_errs += nb_ops - num_rx;
1975
1976         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1977
1978         return num_rx;
1979 }
1980
1981 /** Release queue pair */
1982 static int
1983 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1984                             uint16_t qp_id)
1985 {
1986         struct dpaa_sec_dev_private *internals;
1987         struct dpaa_sec_qp *qp = NULL;
1988
1989         PMD_INIT_FUNC_TRACE();
1990
1991         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1992
1993         internals = dev->data->dev_private;
1994         if (qp_id >= internals->max_nb_queue_pairs) {
1995                 DPAA_SEC_ERR("Max supported qpid %d",
1996                              internals->max_nb_queue_pairs);
1997                 return -EINVAL;
1998         }
1999
2000         qp = &internals->qps[qp_id];
2001         rte_mempool_free(qp->ctx_pool);
2002         qp->internals = NULL;
2003         dev->data->queue_pairs[qp_id] = NULL;
2004
2005         return 0;
2006 }
2007
2008 /** Setup a queue pair */
2009 static int
2010 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2011                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2012                 __rte_unused int socket_id)
2013 {
2014         struct dpaa_sec_dev_private *internals;
2015         struct dpaa_sec_qp *qp = NULL;
2016         char str[20];
2017
2018         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2019
2020         internals = dev->data->dev_private;
2021         if (qp_id >= internals->max_nb_queue_pairs) {
2022                 DPAA_SEC_ERR("Max supported qpid %d",
2023                              internals->max_nb_queue_pairs);
2024                 return -EINVAL;
2025         }
2026
2027         qp = &internals->qps[qp_id];
2028         qp->internals = internals;
2029         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2030                         dev->data->dev_id, qp_id);
2031         if (!qp->ctx_pool) {
2032                 qp->ctx_pool = rte_mempool_create((const char *)str,
2033                                                         CTX_POOL_NUM_BUFS,
2034                                                         CTX_POOL_BUF_SIZE,
2035                                                         CTX_POOL_CACHE_SIZE, 0,
2036                                                         NULL, NULL, NULL, NULL,
2037                                                         SOCKET_ID_ANY, 0);
2038                 if (!qp->ctx_pool) {
2039                         DPAA_SEC_ERR("%s create failed\n", str);
2040                         return -ENOMEM;
2041                 }
2042         } else
2043                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2044                                 dev->data->dev_id, qp_id);
2045         dev->data->queue_pairs[qp_id] = qp;
2046
2047         return 0;
2048 }
2049
2050 /** Returns the size of session structure */
2051 static unsigned int
2052 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2053 {
2054         PMD_INIT_FUNC_TRACE();
2055
2056         return sizeof(dpaa_sec_session);
2057 }
2058
2059 static int
2060 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2061                      struct rte_crypto_sym_xform *xform,
2062                      dpaa_sec_session *session)
2063 {
2064         session->ctxt = DPAA_SEC_CIPHER;
2065         session->cipher_alg = xform->cipher.algo;
2066         session->iv.length = xform->cipher.iv.length;
2067         session->iv.offset = xform->cipher.iv.offset;
2068         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2069                                                RTE_CACHE_LINE_SIZE);
2070         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2071                 DPAA_SEC_ERR("No Memory for cipher key");
2072                 return -ENOMEM;
2073         }
2074         session->cipher_key.length = xform->cipher.key.length;
2075
2076         memcpy(session->cipher_key.data, xform->cipher.key.data,
2077                xform->cipher.key.length);
2078         switch (xform->cipher.algo) {
2079         case RTE_CRYPTO_CIPHER_AES_CBC:
2080                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2081                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2082                 break;
2083         case RTE_CRYPTO_CIPHER_DES_CBC:
2084                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2085                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2086                 break;
2087         case RTE_CRYPTO_CIPHER_3DES_CBC:
2088                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2089                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2090                 break;
2091         case RTE_CRYPTO_CIPHER_AES_CTR:
2092                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2093                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2094                 break;
2095         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2096                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2097                 break;
2098         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2099                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2100                 break;
2101         default:
2102                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2103                               xform->cipher.algo);
2104                 return -ENOTSUP;
2105         }
2106         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2107                         DIR_ENC : DIR_DEC;
2108
2109         return 0;
2110 }
2111
2112 static int
2113 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2114                    struct rte_crypto_sym_xform *xform,
2115                    dpaa_sec_session *session)
2116 {
2117         session->ctxt = DPAA_SEC_AUTH;
2118         session->auth_alg = xform->auth.algo;
2119         session->auth_key.length = xform->auth.key.length;
2120         if (xform->auth.key.length) {
2121                 session->auth_key.data =
2122                                 rte_zmalloc(NULL, xform->auth.key.length,
2123                                              RTE_CACHE_LINE_SIZE);
2124                 if (session->auth_key.data == NULL) {
2125                         DPAA_SEC_ERR("No Memory for auth key");
2126                         return -ENOMEM;
2127                 }
2128                 memcpy(session->auth_key.data, xform->auth.key.data,
2129                                 xform->auth.key.length);
2130
2131         }
2132         session->digest_length = xform->auth.digest_length;
2133         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2134                 session->iv.offset = xform->auth.iv.offset;
2135                 session->iv.length = xform->auth.iv.length;
2136         }
2137
2138         switch (xform->auth.algo) {
2139         case RTE_CRYPTO_AUTH_SHA1:
2140                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2141                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2142                 break;
2143         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2144                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2145                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2146                 break;
2147         case RTE_CRYPTO_AUTH_MD5:
2148                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2149                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2150                 break;
2151         case RTE_CRYPTO_AUTH_MD5_HMAC:
2152                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2153                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2154                 break;
2155         case RTE_CRYPTO_AUTH_SHA224:
2156                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2157                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2158                 break;
2159         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2160                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2161                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2162                 break;
2163         case RTE_CRYPTO_AUTH_SHA256:
2164                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2165                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2166                 break;
2167         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2168                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2169                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2170                 break;
2171         case RTE_CRYPTO_AUTH_SHA384:
2172                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2173                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2174                 break;
2175         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2176                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2177                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2178                 break;
2179         case RTE_CRYPTO_AUTH_SHA512:
2180                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2181                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2182                 break;
2183         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2184                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2185                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2186                 break;
2187         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2188                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2189                 session->auth_key.algmode = OP_ALG_AAI_F9;
2190                 break;
2191         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2192                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2193                 session->auth_key.algmode = OP_ALG_AAI_F9;
2194                 break;
2195         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2196                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2197                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2198                 break;
2199         case RTE_CRYPTO_AUTH_AES_CMAC:
2200                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2201                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2202                 break;
2203         default:
2204                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2205                               xform->auth.algo);
2206                 return -ENOTSUP;
2207         }
2208
2209         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2210                         DIR_ENC : DIR_DEC;
2211
2212         return 0;
2213 }
2214
2215 static int
2216 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2217                    struct rte_crypto_sym_xform *xform,
2218                    dpaa_sec_session *session)
2219 {
2220
2221         struct rte_crypto_cipher_xform *cipher_xform;
2222         struct rte_crypto_auth_xform *auth_xform;
2223
2224         session->ctxt = DPAA_SEC_CIPHER_HASH;
2225         if (session->auth_cipher_text) {
2226                 cipher_xform = &xform->cipher;
2227                 auth_xform = &xform->next->auth;
2228         } else {
2229                 cipher_xform = &xform->next->cipher;
2230                 auth_xform = &xform->auth;
2231         }
2232
2233         /* Set IV parameters */
2234         session->iv.offset = cipher_xform->iv.offset;
2235         session->iv.length = cipher_xform->iv.length;
2236
2237         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2238                                                RTE_CACHE_LINE_SIZE);
2239         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2240                 DPAA_SEC_ERR("No Memory for cipher key");
2241                 return -ENOMEM;
2242         }
2243         session->cipher_key.length = cipher_xform->key.length;
2244         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2245                                              RTE_CACHE_LINE_SIZE);
2246         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2247                 DPAA_SEC_ERR("No Memory for auth key");
2248                 return -ENOMEM;
2249         }
2250         session->auth_key.length = auth_xform->key.length;
2251         memcpy(session->cipher_key.data, cipher_xform->key.data,
2252                cipher_xform->key.length);
2253         memcpy(session->auth_key.data, auth_xform->key.data,
2254                auth_xform->key.length);
2255
2256         session->digest_length = auth_xform->digest_length;
2257         session->auth_alg = auth_xform->algo;
2258
2259         switch (auth_xform->algo) {
2260         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2261                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2262                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2263                 break;
2264         case RTE_CRYPTO_AUTH_MD5_HMAC:
2265                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2266                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2267                 break;
2268         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2269                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2270                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2271                 break;
2272         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2273                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2274                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2275                 break;
2276         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2277                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2278                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2279                 break;
2280         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2281                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2282                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2283                 break;
2284         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2285                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2286                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2287                 break;
2288         case RTE_CRYPTO_AUTH_AES_CMAC:
2289                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2290                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2291                 break;
2292         default:
2293                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2294                               auth_xform->algo);
2295                 return -ENOTSUP;
2296         }
2297
2298         session->cipher_alg = cipher_xform->algo;
2299
2300         switch (cipher_xform->algo) {
2301         case RTE_CRYPTO_CIPHER_AES_CBC:
2302                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2303                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2304                 break;
2305         case RTE_CRYPTO_CIPHER_DES_CBC:
2306                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2307                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2308                 break;
2309         case RTE_CRYPTO_CIPHER_3DES_CBC:
2310                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2311                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2312                 break;
2313         case RTE_CRYPTO_CIPHER_AES_CTR:
2314                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2315                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2316                 break;
2317         default:
2318                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2319                               cipher_xform->algo);
2320                 return -ENOTSUP;
2321         }
2322         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2323                                 DIR_ENC : DIR_DEC;
2324         return 0;
2325 }
2326
2327 static int
2328 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2329                    struct rte_crypto_sym_xform *xform,
2330                    dpaa_sec_session *session)
2331 {
2332         session->aead_alg = xform->aead.algo;
2333         session->ctxt = DPAA_SEC_AEAD;
2334         session->iv.length = xform->aead.iv.length;
2335         session->iv.offset = xform->aead.iv.offset;
2336         session->auth_only_len = xform->aead.aad_length;
2337         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2338                                              RTE_CACHE_LINE_SIZE);
2339         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2340                 DPAA_SEC_ERR("No Memory for aead key\n");
2341                 return -ENOMEM;
2342         }
2343         session->aead_key.length = xform->aead.key.length;
2344         session->digest_length = xform->aead.digest_length;
2345
2346         memcpy(session->aead_key.data, xform->aead.key.data,
2347                xform->aead.key.length);
2348
2349         switch (session->aead_alg) {
2350         case RTE_CRYPTO_AEAD_AES_GCM:
2351                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2352                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2353                 break;
2354         default:
2355                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2356                 return -ENOTSUP;
2357         }
2358
2359         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2360                         DIR_ENC : DIR_DEC;
2361
2362         return 0;
2363 }
2364
2365 static struct qman_fq *
2366 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2367 {
2368         unsigned int i;
2369
2370         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2371                 if (qi->inq_attach[i] == 0) {
2372                         qi->inq_attach[i] = 1;
2373                         return &qi->inq[i];
2374                 }
2375         }
2376         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2377
2378         return NULL;
2379 }
2380
2381 static int
2382 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2383 {
2384         unsigned int i;
2385
2386         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2387                 if (&qi->inq[i] == fq) {
2388                         if (qman_retire_fq(fq, NULL) != 0)
2389                                 DPAA_SEC_DEBUG("Queue is not retired\n");
2390                         qman_oos_fq(fq);
2391                         qi->inq_attach[i] = 0;
2392                         return 0;
2393                 }
2394         }
2395         return -1;
2396 }
2397
2398 int
2399 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2400 {
2401         int ret;
2402
2403         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2404         ret = dpaa_sec_prep_cdb(sess);
2405         if (ret) {
2406                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2407                 return ret;
2408         }
2409         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2410                 ret = rte_dpaa_portal_init((void *)0);
2411                 if (ret) {
2412                         DPAA_SEC_ERR("Failure in affining portal");
2413                         return ret;
2414                 }
2415         }
2416         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2417                                rte_dpaa_mem_vtop(&sess->cdb),
2418                                qman_fq_fqid(&qp->outq));
2419         if (ret)
2420                 DPAA_SEC_ERR("Unable to init sec queue");
2421
2422         return ret;
2423 }
2424
2425 static inline void
2426 free_session_data(dpaa_sec_session *s)
2427 {
2428         if (is_aead(s))
2429                 rte_free(s->aead_key.data);
2430         else {
2431                 rte_free(s->auth_key.data);
2432                 rte_free(s->cipher_key.data);
2433         }
2434         memset(s, 0, sizeof(dpaa_sec_session));
2435 }
2436
2437 static int
2438 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2439                             struct rte_crypto_sym_xform *xform, void *sess)
2440 {
2441         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2442         dpaa_sec_session *session = sess;
2443         uint32_t i;
2444         int ret;
2445
2446         PMD_INIT_FUNC_TRACE();
2447
2448         if (unlikely(sess == NULL)) {
2449                 DPAA_SEC_ERR("invalid session struct");
2450                 return -EINVAL;
2451         }
2452         memset(session, 0, sizeof(dpaa_sec_session));
2453
2454         /* Default IV length = 0 */
2455         session->iv.length = 0;
2456
2457         /* Cipher Only */
2458         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2459                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2460                 ret = dpaa_sec_cipher_init(dev, xform, session);
2461
2462         /* Authentication Only */
2463         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2464                    xform->next == NULL) {
2465                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2466                 session->ctxt = DPAA_SEC_AUTH;
2467                 ret = dpaa_sec_auth_init(dev, xform, session);
2468
2469         /* Cipher then Authenticate */
2470         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2471                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2472                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2473                         session->auth_cipher_text = 1;
2474                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2475                                 ret = dpaa_sec_auth_init(dev, xform, session);
2476                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2477                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2478                         else
2479                                 ret = dpaa_sec_chain_init(dev, xform, session);
2480                 } else {
2481                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2482                         return -ENOTSUP;
2483                 }
2484         /* Authenticate then Cipher */
2485         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2486                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2487                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2488                         session->auth_cipher_text = 0;
2489                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2490                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2491                         else if (xform->next->cipher.algo
2492                                         == RTE_CRYPTO_CIPHER_NULL)
2493                                 ret = dpaa_sec_auth_init(dev, xform, session);
2494                         else
2495                                 ret = dpaa_sec_chain_init(dev, xform, session);
2496                 } else {
2497                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2498                         return -ENOTSUP;
2499                 }
2500
2501         /* AEAD operation for AES-GCM kind of Algorithms */
2502         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2503                    xform->next == NULL) {
2504                 ret = dpaa_sec_aead_init(dev, xform, session);
2505
2506         } else {
2507                 DPAA_SEC_ERR("Invalid crypto type");
2508                 return -EINVAL;
2509         }
2510         if (ret) {
2511                 DPAA_SEC_ERR("unable to init session");
2512                 goto err1;
2513         }
2514
2515         rte_spinlock_lock(&internals->lock);
2516         for (i = 0; i < MAX_DPAA_CORES; i++) {
2517                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2518                 if (session->inq[i] == NULL) {
2519                         DPAA_SEC_ERR("unable to attach sec queue");
2520                         rte_spinlock_unlock(&internals->lock);
2521                         ret = -EBUSY;
2522                         goto err1;
2523                 }
2524         }
2525         rte_spinlock_unlock(&internals->lock);
2526
2527         return 0;
2528
2529 err1:
2530         free_session_data(session);
2531         return ret;
2532 }
2533
2534 static int
2535 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2536                 struct rte_crypto_sym_xform *xform,
2537                 struct rte_cryptodev_sym_session *sess,
2538                 struct rte_mempool *mempool)
2539 {
2540         void *sess_private_data;
2541         int ret;
2542
2543         PMD_INIT_FUNC_TRACE();
2544
2545         if (rte_mempool_get(mempool, &sess_private_data)) {
2546                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2547                 return -ENOMEM;
2548         }
2549
2550         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2551         if (ret != 0) {
2552                 DPAA_SEC_ERR("failed to configure session parameters");
2553
2554                 /* Return session to mempool */
2555                 rte_mempool_put(mempool, sess_private_data);
2556                 return ret;
2557         }
2558
2559         set_sym_session_private_data(sess, dev->driver_id,
2560                         sess_private_data);
2561
2562
2563         return 0;
2564 }
2565
2566 static inline void
2567 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2568 {
2569         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2570         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2571         uint8_t i;
2572
2573         for (i = 0; i < MAX_DPAA_CORES; i++) {
2574                 if (s->inq[i])
2575                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2576                 s->inq[i] = NULL;
2577                 s->qp[i] = NULL;
2578         }
2579         free_session_data(s);
2580         rte_mempool_put(sess_mp, (void *)s);
2581 }
2582
2583 /** Clear the memory of session so it doesn't leave key material behind */
2584 static void
2585 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2586                 struct rte_cryptodev_sym_session *sess)
2587 {
2588         PMD_INIT_FUNC_TRACE();
2589         uint8_t index = dev->driver_id;
2590         void *sess_priv = get_sym_session_private_data(sess, index);
2591         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2592
2593         if (sess_priv) {
2594                 free_session_memory(dev, s);
2595                 set_sym_session_private_data(sess, index, NULL);
2596         }
2597 }
2598
2599 #ifdef RTE_LIB_SECURITY
2600 static int
2601 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2602                         struct rte_security_ipsec_xform *ipsec_xform,
2603                         dpaa_sec_session *session)
2604 {
2605         PMD_INIT_FUNC_TRACE();
2606
2607         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2608                                                RTE_CACHE_LINE_SIZE);
2609         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2610                 DPAA_SEC_ERR("No Memory for aead key");
2611                 return -ENOMEM;
2612         }
2613         memcpy(session->aead_key.data, aead_xform->key.data,
2614                aead_xform->key.length);
2615
2616         session->digest_length = aead_xform->digest_length;
2617         session->aead_key.length = aead_xform->key.length;
2618
2619         switch (aead_xform->algo) {
2620         case RTE_CRYPTO_AEAD_AES_GCM:
2621                 switch (session->digest_length) {
2622                 case 8:
2623                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2624                         break;
2625                 case 12:
2626                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2627                         break;
2628                 case 16:
2629                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2630                         break;
2631                 default:
2632                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2633                                      session->digest_length);
2634                         return -EINVAL;
2635                 }
2636                 if (session->dir == DIR_ENC) {
2637                         memcpy(session->encap_pdb.gcm.salt,
2638                                 (uint8_t *)&(ipsec_xform->salt), 4);
2639                 } else {
2640                         memcpy(session->decap_pdb.gcm.salt,
2641                                 (uint8_t *)&(ipsec_xform->salt), 4);
2642                 }
2643                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2644                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2645                 break;
2646         default:
2647                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2648                               aead_xform->algo);
2649                 return -ENOTSUP;
2650         }
2651         return 0;
2652 }
2653
2654 static int
2655 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2656         struct rte_crypto_auth_xform *auth_xform,
2657         struct rte_security_ipsec_xform *ipsec_xform,
2658         dpaa_sec_session *session)
2659 {
2660         if (cipher_xform) {
2661                 session->cipher_key.data = rte_zmalloc(NULL,
2662                                                        cipher_xform->key.length,
2663                                                        RTE_CACHE_LINE_SIZE);
2664                 if (session->cipher_key.data == NULL &&
2665                                 cipher_xform->key.length > 0) {
2666                         DPAA_SEC_ERR("No Memory for cipher key");
2667                         return -ENOMEM;
2668                 }
2669
2670                 session->cipher_key.length = cipher_xform->key.length;
2671                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2672                                 cipher_xform->key.length);
2673                 session->cipher_alg = cipher_xform->algo;
2674         } else {
2675                 session->cipher_key.data = NULL;
2676                 session->cipher_key.length = 0;
2677                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2678         }
2679
2680         if (auth_xform) {
2681                 session->auth_key.data = rte_zmalloc(NULL,
2682                                                 auth_xform->key.length,
2683                                                 RTE_CACHE_LINE_SIZE);
2684                 if (session->auth_key.data == NULL &&
2685                                 auth_xform->key.length > 0) {
2686                         DPAA_SEC_ERR("No Memory for auth key");
2687                         return -ENOMEM;
2688                 }
2689                 session->auth_key.length = auth_xform->key.length;
2690                 memcpy(session->auth_key.data, auth_xform->key.data,
2691                                 auth_xform->key.length);
2692                 session->auth_alg = auth_xform->algo;
2693                 session->digest_length = auth_xform->digest_length;
2694         } else {
2695                 session->auth_key.data = NULL;
2696                 session->auth_key.length = 0;
2697                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2698         }
2699
2700         switch (session->auth_alg) {
2701         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2702                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2703                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2704                 break;
2705         case RTE_CRYPTO_AUTH_MD5_HMAC:
2706                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2707                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2708                 break;
2709         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2710                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2711                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2712                 if (session->digest_length != 16)
2713                         DPAA_SEC_WARN(
2714                         "+++Using sha256-hmac truncated len is non-standard,"
2715                         "it will not work with lookaside proto");
2716                 break;
2717         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2718                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2719                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2720                 break;
2721         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2722                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2723                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2724                 break;
2725         case RTE_CRYPTO_AUTH_AES_CMAC:
2726                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2727                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2728                 break;
2729         case RTE_CRYPTO_AUTH_NULL:
2730                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2731                 break;
2732         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2733                 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2734                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2735                 break;
2736         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2737         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2738         case RTE_CRYPTO_AUTH_SHA1:
2739         case RTE_CRYPTO_AUTH_SHA256:
2740         case RTE_CRYPTO_AUTH_SHA512:
2741         case RTE_CRYPTO_AUTH_SHA224:
2742         case RTE_CRYPTO_AUTH_SHA384:
2743         case RTE_CRYPTO_AUTH_MD5:
2744         case RTE_CRYPTO_AUTH_AES_GMAC:
2745         case RTE_CRYPTO_AUTH_KASUMI_F9:
2746         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2747         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2748                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2749                               session->auth_alg);
2750                 return -ENOTSUP;
2751         default:
2752                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2753                               session->auth_alg);
2754                 return -ENOTSUP;
2755         }
2756
2757         switch (session->cipher_alg) {
2758         case RTE_CRYPTO_CIPHER_AES_CBC:
2759                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2760                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2761                 break;
2762         case RTE_CRYPTO_CIPHER_DES_CBC:
2763                 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2764                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2765                 break;
2766         case RTE_CRYPTO_CIPHER_3DES_CBC:
2767                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2768                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2769                 break;
2770         case RTE_CRYPTO_CIPHER_AES_CTR:
2771                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2772                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2773                 if (session->dir == DIR_ENC) {
2774                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2775                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2776                 } else {
2777                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2778                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2779                 }
2780                 break;
2781         case RTE_CRYPTO_CIPHER_NULL:
2782                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2783                 break;
2784         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2785         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2786         case RTE_CRYPTO_CIPHER_3DES_ECB:
2787         case RTE_CRYPTO_CIPHER_AES_ECB:
2788         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2789                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2790                               session->cipher_alg);
2791                 return -ENOTSUP;
2792         default:
2793                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2794                               session->cipher_alg);
2795                 return -ENOTSUP;
2796         }
2797
2798         return 0;
2799 }
2800
2801 static int
2802 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2803                            struct rte_security_session_conf *conf,
2804                            void *sess)
2805 {
2806         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2807         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2808         struct rte_crypto_auth_xform *auth_xform = NULL;
2809         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2810         struct rte_crypto_aead_xform *aead_xform = NULL;
2811         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2812         uint32_t i;
2813         int ret;
2814
2815         PMD_INIT_FUNC_TRACE();
2816
2817         memset(session, 0, sizeof(dpaa_sec_session));
2818         session->proto_alg = conf->protocol;
2819         session->ctxt = DPAA_SEC_IPSEC;
2820
2821         if (ipsec_xform->life.bytes_hard_limit != 0 ||
2822             ipsec_xform->life.bytes_soft_limit != 0 ||
2823             ipsec_xform->life.packets_hard_limit != 0 ||
2824             ipsec_xform->life.packets_soft_limit != 0)
2825                 return -ENOTSUP;
2826
2827         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2828                 session->dir = DIR_ENC;
2829         else
2830                 session->dir = DIR_DEC;
2831
2832         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2833                 cipher_xform = &conf->crypto_xform->cipher;
2834                 if (conf->crypto_xform->next)
2835                         auth_xform = &conf->crypto_xform->next->auth;
2836                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2837                                         ipsec_xform, session);
2838         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2839                 auth_xform = &conf->crypto_xform->auth;
2840                 if (conf->crypto_xform->next)
2841                         cipher_xform = &conf->crypto_xform->next->cipher;
2842                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2843                                         ipsec_xform, session);
2844         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2845                 aead_xform = &conf->crypto_xform->aead;
2846                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2847                                         ipsec_xform, session);
2848         } else {
2849                 DPAA_SEC_ERR("XFORM not specified");
2850                 ret = -EINVAL;
2851                 goto out;
2852         }
2853         if (ret) {
2854                 DPAA_SEC_ERR("Failed to process xform");
2855                 goto out;
2856         }
2857
2858         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2859                 if (ipsec_xform->tunnel.type ==
2860                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2861                         session->ip4_hdr.ip_v = IPVERSION;
2862                         session->ip4_hdr.ip_hl = 5;
2863                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2864                                                 sizeof(session->ip4_hdr));
2865                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2866                         session->ip4_hdr.ip_id = 0;
2867                         session->ip4_hdr.ip_off = 0;
2868                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2869                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2870                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2871                                         IPPROTO_ESP : IPPROTO_AH;
2872                         session->ip4_hdr.ip_sum = 0;
2873                         session->ip4_hdr.ip_src =
2874                                         ipsec_xform->tunnel.ipv4.src_ip;
2875                         session->ip4_hdr.ip_dst =
2876                                         ipsec_xform->tunnel.ipv4.dst_ip;
2877                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2878                                                 (void *)&session->ip4_hdr,
2879                                                 sizeof(struct ip));
2880                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2881                 } else if (ipsec_xform->tunnel.type ==
2882                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2883                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2884                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2885                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2886                                         RTE_IPV6_HDR_TC_SHIFT) &
2887                                         RTE_IPV6_HDR_TC_MASK) |
2888                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2889                                         RTE_IPV6_HDR_FL_SHIFT) &
2890                                         RTE_IPV6_HDR_FL_MASK));
2891                         /* Payload length will be updated by HW */
2892                         session->ip6_hdr.payload_len = 0;
2893                         session->ip6_hdr.hop_limits =
2894                                         ipsec_xform->tunnel.ipv6.hlimit;
2895                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2896                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2897                                         IPPROTO_ESP : IPPROTO_AH;
2898                         memcpy(&session->ip6_hdr.src_addr,
2899                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2900                         memcpy(&session->ip6_hdr.dst_addr,
2901                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2902                         session->encap_pdb.ip_hdr_len =
2903                                                 sizeof(struct rte_ipv6_hdr);
2904                 }
2905
2906                 session->encap_pdb.options =
2907                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2908                         PDBOPTS_ESP_OIHI_PDB_INL |
2909                         PDBOPTS_ESP_IVSRC |
2910                         PDBHMO_ESP_SNR;
2911                 if (ipsec_xform->options.dec_ttl)
2912                         session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
2913                 if (ipsec_xform->options.esn)
2914                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2915                 session->encap_pdb.spi = ipsec_xform->spi;
2916
2917         } else if (ipsec_xform->direction ==
2918                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2919                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2920                         session->decap_pdb.options = sizeof(struct ip) << 16;
2921                 else
2922                         session->decap_pdb.options =
2923                                         sizeof(struct rte_ipv6_hdr) << 16;
2924                 if (ipsec_xform->options.esn)
2925                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2926                 if (ipsec_xform->replay_win_sz) {
2927                         uint32_t win_sz;
2928                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2929
2930                         switch (win_sz) {
2931                         case 1:
2932                         case 2:
2933                         case 4:
2934                         case 8:
2935                         case 16:
2936                         case 32:
2937                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2938                                 break;
2939                         case 64:
2940                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2941                                 break;
2942                         default:
2943                                 session->decap_pdb.options |=
2944                                                         PDBOPTS_ESP_ARS128;
2945                         }
2946                 }
2947         } else
2948                 goto out;
2949         rte_spinlock_lock(&internals->lock);
2950         for (i = 0; i < MAX_DPAA_CORES; i++) {
2951                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2952                 if (session->inq[i] == NULL) {
2953                         DPAA_SEC_ERR("unable to attach sec queue");
2954                         rte_spinlock_unlock(&internals->lock);
2955                         goto out;
2956                 }
2957         }
2958         rte_spinlock_unlock(&internals->lock);
2959
2960         return 0;
2961 out:
2962         free_session_data(session);
2963         return -1;
2964 }
2965
2966 static int
2967 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2968                           struct rte_security_session_conf *conf,
2969                           void *sess)
2970 {
2971         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2972         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2973         struct rte_crypto_auth_xform *auth_xform = NULL;
2974         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2975         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2976         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2977         uint32_t i;
2978         int ret;
2979
2980         PMD_INIT_FUNC_TRACE();
2981
2982         memset(session, 0, sizeof(dpaa_sec_session));
2983
2984         /* find xfrm types */
2985         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2986                 cipher_xform = &xform->cipher;
2987                 if (xform->next != NULL)
2988                         auth_xform = &xform->next->auth;
2989         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2990                 auth_xform = &xform->auth;
2991                 if (xform->next != NULL)
2992                         cipher_xform = &xform->next->cipher;
2993         } else {
2994                 DPAA_SEC_ERR("Invalid crypto type");
2995                 return -EINVAL;
2996         }
2997
2998         session->proto_alg = conf->protocol;
2999         session->ctxt = DPAA_SEC_PDCP;
3000
3001         if (cipher_xform) {
3002                 switch (cipher_xform->algo) {
3003                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3004                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3005                         break;
3006                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3007                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3008                         break;
3009                 case RTE_CRYPTO_CIPHER_AES_CTR:
3010                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3011                         break;
3012                 case RTE_CRYPTO_CIPHER_NULL:
3013                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3014                         break;
3015                 default:
3016                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3017                                       session->cipher_alg);
3018                         return -EINVAL;
3019                 }
3020
3021                 session->cipher_key.data = rte_zmalloc(NULL,
3022                                                cipher_xform->key.length,
3023                                                RTE_CACHE_LINE_SIZE);
3024                 if (session->cipher_key.data == NULL &&
3025                                 cipher_xform->key.length > 0) {
3026                         DPAA_SEC_ERR("No Memory for cipher key");
3027                         return -ENOMEM;
3028                 }
3029                 session->cipher_key.length = cipher_xform->key.length;
3030                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3031                         cipher_xform->key.length);
3032                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3033                                         DIR_ENC : DIR_DEC;
3034                 session->cipher_alg = cipher_xform->algo;
3035         } else {
3036                 session->cipher_key.data = NULL;
3037                 session->cipher_key.length = 0;
3038                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3039                 session->dir = DIR_ENC;
3040         }
3041
3042         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3043                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3044                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3045                         DPAA_SEC_ERR(
3046                                 "PDCP Seq Num size should be 5/12 bits for cmode");
3047                         ret = -EINVAL;
3048                         goto out;
3049                 }
3050         }
3051
3052         if (auth_xform) {
3053                 switch (auth_xform->algo) {
3054                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3055                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3056                         break;
3057                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3058                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3059                         break;
3060                 case RTE_CRYPTO_AUTH_AES_CMAC:
3061                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3062                         break;
3063                 case RTE_CRYPTO_AUTH_NULL:
3064                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3065                         break;
3066                 default:
3067                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3068                                       session->auth_alg);
3069                         rte_free(session->cipher_key.data);
3070                         return -EINVAL;
3071                 }
3072                 session->auth_key.data = rte_zmalloc(NULL,
3073                                                      auth_xform->key.length,
3074                                                      RTE_CACHE_LINE_SIZE);
3075                 if (!session->auth_key.data &&
3076                     auth_xform->key.length > 0) {
3077                         DPAA_SEC_ERR("No Memory for auth key");
3078                         rte_free(session->cipher_key.data);
3079                         return -ENOMEM;
3080                 }
3081                 session->auth_key.length = auth_xform->key.length;
3082                 memcpy(session->auth_key.data, auth_xform->key.data,
3083                        auth_xform->key.length);
3084                 session->auth_alg = auth_xform->algo;
3085         } else {
3086                 session->auth_key.data = NULL;
3087                 session->auth_key.length = 0;
3088                 session->auth_alg = 0;
3089         }
3090         session->pdcp.domain = pdcp_xform->domain;
3091         session->pdcp.bearer = pdcp_xform->bearer;
3092         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3093         session->pdcp.sn_size = pdcp_xform->sn_size;
3094         session->pdcp.hfn = pdcp_xform->hfn;
3095         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3096         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3097         session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3098         if (cipher_xform)
3099                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3100
3101         rte_spinlock_lock(&dev_priv->lock);
3102         for (i = 0; i < MAX_DPAA_CORES; i++) {
3103                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3104                 if (session->inq[i] == NULL) {
3105                         DPAA_SEC_ERR("unable to attach sec queue");
3106                         rte_spinlock_unlock(&dev_priv->lock);
3107                         ret = -EBUSY;
3108                         goto out;
3109                 }
3110         }
3111         rte_spinlock_unlock(&dev_priv->lock);
3112         return 0;
3113 out:
3114         rte_free(session->auth_key.data);
3115         rte_free(session->cipher_key.data);
3116         memset(session, 0, sizeof(dpaa_sec_session));
3117         return ret;
3118 }
3119
3120 static int
3121 dpaa_sec_security_session_create(void *dev,
3122                                  struct rte_security_session_conf *conf,
3123                                  struct rte_security_session *sess,
3124                                  struct rte_mempool *mempool)
3125 {
3126         void *sess_private_data;
3127         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3128         int ret;
3129
3130         if (rte_mempool_get(mempool, &sess_private_data)) {
3131                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3132                 return -ENOMEM;
3133         }
3134
3135         switch (conf->protocol) {
3136         case RTE_SECURITY_PROTOCOL_IPSEC:
3137                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3138                                 sess_private_data);
3139                 break;
3140         case RTE_SECURITY_PROTOCOL_PDCP:
3141                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3142                                 sess_private_data);
3143                 break;
3144         case RTE_SECURITY_PROTOCOL_MACSEC:
3145                 return -ENOTSUP;
3146         default:
3147                 return -EINVAL;
3148         }
3149         if (ret != 0) {
3150                 DPAA_SEC_ERR("failed to configure session parameters");
3151                 /* Return session to mempool */
3152                 rte_mempool_put(mempool, sess_private_data);
3153                 return ret;
3154         }
3155
3156         set_sec_session_private_data(sess, sess_private_data);
3157
3158         return ret;
3159 }
3160
3161 /** Clear the memory of session so it doesn't leave key material behind */
3162 static int
3163 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3164                 struct rte_security_session *sess)
3165 {
3166         PMD_INIT_FUNC_TRACE();
3167         void *sess_priv = get_sec_session_private_data(sess);
3168         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3169
3170         if (sess_priv) {
3171                 free_session_memory((struct rte_cryptodev *)dev, s);
3172                 set_sec_session_private_data(sess, NULL);
3173         }
3174         return 0;
3175 }
3176 #endif
3177 static int
3178 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3179                        struct rte_cryptodev_config *config __rte_unused)
3180 {
3181         PMD_INIT_FUNC_TRACE();
3182
3183         return 0;
3184 }
3185
3186 static int
3187 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3188 {
3189         PMD_INIT_FUNC_TRACE();
3190         return 0;
3191 }
3192
3193 static void
3194 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3195 {
3196         PMD_INIT_FUNC_TRACE();
3197 }
3198
3199 static int
3200 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3201 {
3202         PMD_INIT_FUNC_TRACE();
3203
3204         if (dev == NULL)
3205                 return -ENOMEM;
3206
3207         return 0;
3208 }
3209
3210 static void
3211 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3212                        struct rte_cryptodev_info *info)
3213 {
3214         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3215
3216         PMD_INIT_FUNC_TRACE();
3217         if (info != NULL) {
3218                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3219                 info->feature_flags = dev->feature_flags;
3220                 info->capabilities = dpaa_sec_capabilities;
3221                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3222                 info->driver_id = dpaa_cryptodev_driver_id;
3223         }
3224 }
3225
3226 static enum qman_cb_dqrr_result
3227 dpaa_sec_process_parallel_event(void *event,
3228                         struct qman_portal *qm __always_unused,
3229                         struct qman_fq *outq,
3230                         const struct qm_dqrr_entry *dqrr,
3231                         void **bufs)
3232 {
3233         const struct qm_fd *fd;
3234         struct dpaa_sec_job *job;
3235         struct dpaa_sec_op_ctx *ctx;
3236         struct rte_event *ev = (struct rte_event *)event;
3237
3238         fd = &dqrr->fd;
3239
3240         /* sg is embedded in an op ctx,
3241          * sg[0] is for output
3242          * sg[1] for input
3243          */
3244         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3245
3246         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3247         ctx->fd_status = fd->status;
3248         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3249                 struct qm_sg_entry *sg_out;
3250                 uint32_t len;
3251
3252                 sg_out = &job->sg[0];
3253                 hw_sg_to_cpu(sg_out);
3254                 len = sg_out->length;
3255                 ctx->op->sym->m_src->pkt_len = len;
3256                 ctx->op->sym->m_src->data_len = len;
3257         }
3258         if (!ctx->fd_status) {
3259                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3260         } else {
3261                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3262                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3263         }
3264         ev->event_ptr = (void *)ctx->op;
3265
3266         ev->flow_id = outq->ev.flow_id;
3267         ev->sub_event_type = outq->ev.sub_event_type;
3268         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3269         ev->op = RTE_EVENT_OP_NEW;
3270         ev->sched_type = outq->ev.sched_type;
3271         ev->queue_id = outq->ev.queue_id;
3272         ev->priority = outq->ev.priority;
3273         *bufs = (void *)ctx->op;
3274
3275         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3276
3277         return qman_cb_dqrr_consume;
3278 }
3279
3280 static enum qman_cb_dqrr_result
3281 dpaa_sec_process_atomic_event(void *event,
3282                         struct qman_portal *qm __rte_unused,
3283                         struct qman_fq *outq,
3284                         const struct qm_dqrr_entry *dqrr,
3285                         void **bufs)
3286 {
3287         u8 index;
3288         const struct qm_fd *fd;
3289         struct dpaa_sec_job *job;
3290         struct dpaa_sec_op_ctx *ctx;
3291         struct rte_event *ev = (struct rte_event *)event;
3292
3293         fd = &dqrr->fd;
3294
3295         /* sg is embedded in an op ctx,
3296          * sg[0] is for output
3297          * sg[1] for input
3298          */
3299         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3300
3301         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3302         ctx->fd_status = fd->status;
3303         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3304                 struct qm_sg_entry *sg_out;
3305                 uint32_t len;
3306
3307                 sg_out = &job->sg[0];
3308                 hw_sg_to_cpu(sg_out);
3309                 len = sg_out->length;
3310                 ctx->op->sym->m_src->pkt_len = len;
3311                 ctx->op->sym->m_src->data_len = len;
3312         }
3313         if (!ctx->fd_status) {
3314                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3315         } else {
3316                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3317                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3318         }
3319         ev->event_ptr = (void *)ctx->op;
3320         ev->flow_id = outq->ev.flow_id;
3321         ev->sub_event_type = outq->ev.sub_event_type;
3322         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3323         ev->op = RTE_EVENT_OP_NEW;
3324         ev->sched_type = outq->ev.sched_type;
3325         ev->queue_id = outq->ev.queue_id;
3326         ev->priority = outq->ev.priority;
3327
3328         /* Save active dqrr entries */
3329         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3330         DPAA_PER_LCORE_DQRR_SIZE++;
3331         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3332         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3333         ev->impl_opaque = index + 1;
3334         *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3335         *bufs = (void *)ctx->op;
3336
3337         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3338
3339         return qman_cb_dqrr_defer;
3340 }
3341
3342 int
3343 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3344                 int qp_id,
3345                 uint16_t ch_id,
3346                 const struct rte_event *event)
3347 {
3348         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3349         struct qm_mcc_initfq opts = {0};
3350
3351         int ret;
3352
3353         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3354                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3355         opts.fqd.dest.channel = ch_id;
3356
3357         switch (event->sched_type) {
3358         case RTE_SCHED_TYPE_ATOMIC:
3359                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3360                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3361                  * configuration with HOLD_ACTIVE setting
3362                  */
3363                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3364                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3365                 break;
3366         case RTE_SCHED_TYPE_ORDERED:
3367                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3368                 return -ENOTSUP;
3369         default:
3370                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3371                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3372                 break;
3373         }
3374
3375         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3376         if (unlikely(ret)) {
3377                 DPAA_SEC_ERR("unable to init caam source fq!");
3378                 return ret;
3379         }
3380
3381         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3382
3383         return 0;
3384 }
3385
3386 int
3387 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3388                         int qp_id)
3389 {
3390         struct qm_mcc_initfq opts = {0};
3391         int ret;
3392         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3393
3394         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3395                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3396         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3397         qp->outq.cb.ern  = ern_sec_fq_handler;
3398         qman_retire_fq(&qp->outq, NULL);
3399         qman_oos_fq(&qp->outq);
3400         ret = qman_init_fq(&qp->outq, 0, &opts);
3401         if (ret)
3402                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3403         qp->outq.cb.dqrr = NULL;
3404
3405         return ret;
3406 }
3407
3408 static struct rte_cryptodev_ops crypto_ops = {
3409         .dev_configure        = dpaa_sec_dev_configure,
3410         .dev_start            = dpaa_sec_dev_start,
3411         .dev_stop             = dpaa_sec_dev_stop,
3412         .dev_close            = dpaa_sec_dev_close,
3413         .dev_infos_get        = dpaa_sec_dev_infos_get,
3414         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3415         .queue_pair_release   = dpaa_sec_queue_pair_release,
3416         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3417         .sym_session_configure    = dpaa_sec_sym_session_configure,
3418         .sym_session_clear        = dpaa_sec_sym_session_clear,
3419         /* Raw data-path API related operations */
3420         .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3421         .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3422 };
3423
3424 #ifdef RTE_LIB_SECURITY
3425 static const struct rte_security_capability *
3426 dpaa_sec_capabilities_get(void *device __rte_unused)
3427 {
3428         return dpaa_sec_security_cap;
3429 }
3430
3431 static const struct rte_security_ops dpaa_sec_security_ops = {
3432         .session_create = dpaa_sec_security_session_create,
3433         .session_update = NULL,
3434         .session_stats_get = NULL,
3435         .session_destroy = dpaa_sec_security_session_destroy,
3436         .set_pkt_metadata = NULL,
3437         .capabilities_get = dpaa_sec_capabilities_get
3438 };
3439 #endif
3440 static int
3441 dpaa_sec_uninit(struct rte_cryptodev *dev)
3442 {
3443         struct dpaa_sec_dev_private *internals;
3444
3445         if (dev == NULL)
3446                 return -ENODEV;
3447
3448         internals = dev->data->dev_private;
3449         rte_free(dev->security_ctx);
3450
3451         rte_free(internals);
3452
3453         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3454                       dev->data->name, rte_socket_id());
3455
3456         return 0;
3457 }
3458
3459 static int
3460 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3461 {
3462         struct dpaa_sec_dev_private *internals;
3463 #ifdef RTE_LIB_SECURITY
3464         struct rte_security_ctx *security_instance;
3465 #endif
3466         struct dpaa_sec_qp *qp;
3467         uint32_t i, flags;
3468         int ret;
3469
3470         PMD_INIT_FUNC_TRACE();
3471
3472         cryptodev->driver_id = dpaa_cryptodev_driver_id;
3473         cryptodev->dev_ops = &crypto_ops;
3474
3475         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3476         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3477         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3478                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3479                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3480                         RTE_CRYPTODEV_FF_SECURITY |
3481                         RTE_CRYPTODEV_FF_SYM_RAW_DP |
3482                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3483                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3484                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3485                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3486                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3487
3488         internals = cryptodev->data->dev_private;
3489         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3490         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3491
3492         /*
3493          * For secondary processes, we don't initialise any further as primary
3494          * has already done this work. Only check we don't need a different
3495          * RX function
3496          */
3497         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3498                 DPAA_SEC_WARN("Device already init by primary process");
3499                 return 0;
3500         }
3501 #ifdef RTE_LIB_SECURITY
3502         /* Initialize security_ctx only for primary process*/
3503         security_instance = rte_malloc("rte_security_instances_ops",
3504                                 sizeof(struct rte_security_ctx), 0);
3505         if (security_instance == NULL)
3506                 return -ENOMEM;
3507         security_instance->device = (void *)cryptodev;
3508         security_instance->ops = &dpaa_sec_security_ops;
3509         security_instance->sess_cnt = 0;
3510         cryptodev->security_ctx = security_instance;
3511 #endif
3512         rte_spinlock_init(&internals->lock);
3513         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3514                 /* init qman fq for queue pair */
3515                 qp = &internals->qps[i];
3516                 ret = dpaa_sec_init_tx(&qp->outq);
3517                 if (ret) {
3518                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3519                         goto init_error;
3520                 }
3521         }
3522
3523         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3524                 QMAN_FQ_FLAG_TO_DCPORTAL;
3525         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3526                 /* create rx qman fq for sessions*/
3527                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3528                 if (unlikely(ret != 0)) {
3529                         DPAA_SEC_ERR("sec qman_create_fq failed");
3530                         goto init_error;
3531                 }
3532         }
3533
3534         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3535         return 0;
3536
3537 init_error:
3538         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3539
3540         rte_free(cryptodev->security_ctx);
3541         return -EFAULT;
3542 }
3543
3544 static int
3545 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3546                                 struct rte_dpaa_device *dpaa_dev)
3547 {
3548         struct rte_cryptodev *cryptodev;
3549         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3550
3551         int retval;
3552
3553         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3554
3555         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3556         if (cryptodev == NULL)
3557                 return -ENOMEM;
3558
3559         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3560                 cryptodev->data->dev_private = rte_zmalloc_socket(
3561                                         "cryptodev private structure",
3562                                         sizeof(struct dpaa_sec_dev_private),
3563                                         RTE_CACHE_LINE_SIZE,
3564                                         rte_socket_id());
3565
3566                 if (cryptodev->data->dev_private == NULL)
3567                         rte_panic("Cannot allocate memzone for private "
3568                                         "device data");
3569         }
3570
3571         dpaa_dev->crypto_dev = cryptodev;
3572         cryptodev->device = &dpaa_dev->device;
3573
3574         /* init user callbacks */
3575         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3576
3577         /* if sec device version is not configured */
3578         if (!rta_get_sec_era()) {
3579                 const struct device_node *caam_node;
3580
3581                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3582                         const uint32_t *prop = of_get_property(caam_node,
3583                                         "fsl,sec-era",
3584                                         NULL);
3585                         if (prop) {
3586                                 rta_set_sec_era(
3587                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3588                                 break;
3589                         }
3590                 }
3591         }
3592
3593         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3594                 retval = rte_dpaa_portal_init((void *)1);
3595                 if (retval) {
3596                         DPAA_SEC_ERR("Unable to initialize portal");
3597                         goto out;
3598                 }
3599         }
3600
3601         /* Invoke PMD device initialization function */
3602         retval = dpaa_sec_dev_init(cryptodev);
3603         if (retval == 0) {
3604                 rte_cryptodev_pmd_probing_finish(cryptodev);
3605                 return 0;
3606         }
3607
3608         retval = -ENXIO;
3609 out:
3610         /* In case of error, cleanup is done */
3611         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3612                 rte_free(cryptodev->data->dev_private);
3613
3614         rte_cryptodev_pmd_release_device(cryptodev);
3615
3616         return retval;
3617 }
3618
3619 static int
3620 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3621 {
3622         struct rte_cryptodev *cryptodev;
3623         int ret;
3624
3625         cryptodev = dpaa_dev->crypto_dev;
3626         if (cryptodev == NULL)
3627                 return -ENODEV;
3628
3629         ret = dpaa_sec_uninit(cryptodev);
3630         if (ret)
3631                 return ret;
3632
3633         return rte_cryptodev_pmd_destroy(cryptodev);
3634 }
3635
3636 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3637         .drv_type = FSL_DPAA_CRYPTO,
3638         .driver = {
3639                 .name = "DPAA SEC PMD"
3640         },
3641         .probe = cryptodev_dpaa_sec_probe,
3642         .remove = cryptodev_dpaa_sec_remove,
3643 };
3644
3645 static struct cryptodev_driver dpaa_sec_crypto_drv;
3646
3647 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3648 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3649                 dpaa_cryptodev_driver_id);
3650 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);