crypto/dpaa_sec: enable QI physically
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2022 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_io.h>
24 #include <rte_ip.h>
25 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
27 #include <rte_mbuf.h>
28 #include <rte_memcpy.h>
29 #include <rte_string_fns.h>
30 #include <rte_spinlock.h>
31 #include <rte_hexdump.h>
32
33 #include <fsl_usd.h>
34 #include <fsl_qman.h>
35 #include <dpaa_of.h>
36
37 /* RTA header files */
38 #include <desc/common.h>
39 #include <desc/algo.h>
40 #include <desc/ipsec.h>
41 #include <desc/pdcp.h>
42 #include <desc/sdap.h>
43
44 #include <rte_dpaa_bus.h>
45 #include <dpaa_sec.h>
46 #include <dpaa_sec_event.h>
47 #include <dpaa_sec_log.h>
48 #include <dpaax_iova_table.h>
49
50 #define DRIVER_DUMP_MODE "drv_dump_mode"
51
52 /* DPAA_SEC_DP_DUMP levels */
53 enum dpaa_sec_dump_levels {
54         DPAA_SEC_DP_NO_DUMP,
55         DPAA_SEC_DP_ERR_DUMP,
56         DPAA_SEC_DP_FULL_DUMP
57 };
58
59 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
60
61 uint8_t dpaa_cryptodev_driver_id;
62
63 static inline void
64 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
65 {
66         if (!ctx->fd_status) {
67                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
68         } else {
69                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
70                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
71         }
72 }
73
74 static inline struct dpaa_sec_op_ctx *
75 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
76 {
77         struct dpaa_sec_op_ctx *ctx;
78         int i, retval;
79
80         retval = rte_mempool_get(
81                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
82                         (void **)(&ctx));
83         if (!ctx || retval) {
84                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
85                 return NULL;
86         }
87         /*
88          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
89          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
90          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
91          * each packet, memset is costlier than dcbz_64().
92          */
93         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
94                 dcbz_64(&ctx->job.sg[i]);
95
96         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
97         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
98
99         return ctx;
100 }
101
102 static void
103 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
104                    struct qman_fq *fq,
105                    const struct qm_mr_entry *msg)
106 {
107         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
108                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
109 }
110
111 /* initialize the queue with dest chan as caam chan so that
112  * all the packets in this queue could be dispatched into caam
113  */
114 static int
115 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
116                  uint32_t fqid_out)
117 {
118         struct qm_mcc_initfq fq_opts;
119         uint32_t flags;
120         int ret = -1;
121
122         /* Clear FQ options */
123         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
124
125         flags = QMAN_INITFQ_FLAG_SCHED;
126         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
127                           QM_INITFQ_WE_CONTEXTB;
128
129         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
130         fq_opts.fqd.context_b = fqid_out;
131         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
132         fq_opts.fqd.dest.wq = 0;
133
134         fq_in->cb.ern  = ern_sec_fq_handler;
135
136         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
137
138         ret = qman_init_fq(fq_in, flags, &fq_opts);
139         if (unlikely(ret != 0))
140                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
141
142         return ret;
143 }
144
145 /* something is put into in_fq and caam put the crypto result into out_fq */
146 static enum qman_cb_dqrr_result
147 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
148                   struct qman_fq *fq __always_unused,
149                   const struct qm_dqrr_entry *dqrr)
150 {
151         const struct qm_fd *fd;
152         struct dpaa_sec_job *job;
153         struct dpaa_sec_op_ctx *ctx;
154
155         if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
156                 return qman_cb_dqrr_defer;
157
158         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
159                 return qman_cb_dqrr_consume;
160
161         fd = &dqrr->fd;
162         /* sg is embedded in an op ctx,
163          * sg[0] is for output
164          * sg[1] for input
165          */
166         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
167
168         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
169         ctx->fd_status = fd->status;
170         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
171                 struct qm_sg_entry *sg_out;
172                 uint32_t len;
173                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
174                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
175
176                 sg_out = &job->sg[0];
177                 hw_sg_to_cpu(sg_out);
178                 len = sg_out->length;
179                 mbuf->pkt_len = len;
180                 while (mbuf->next != NULL) {
181                         len -= mbuf->data_len;
182                         mbuf = mbuf->next;
183                 }
184                 mbuf->data_len = len;
185         }
186         DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
187         dpaa_sec_op_ending(ctx);
188
189         return qman_cb_dqrr_consume;
190 }
191
192 /* caam result is put into this queue */
193 static int
194 dpaa_sec_init_tx(struct qman_fq *fq)
195 {
196         int ret;
197         struct qm_mcc_initfq opts;
198         uint32_t flags;
199
200         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
201                 QMAN_FQ_FLAG_DYNAMIC_FQID;
202
203         ret = qman_create_fq(0, flags, fq);
204         if (unlikely(ret)) {
205                 DPAA_SEC_ERR("qman_create_fq failed");
206                 return ret;
207         }
208
209         memset(&opts, 0, sizeof(opts));
210         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
211                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
212
213         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
214
215         fq->cb.dqrr = dqrr_out_fq_cb_rx;
216         fq->cb.ern  = ern_sec_fq_handler;
217
218         ret = qman_init_fq(fq, 0, &opts);
219         if (unlikely(ret)) {
220                 DPAA_SEC_ERR("unable to init caam source fq!");
221                 return ret;
222         }
223
224         return ret;
225 }
226
227 static inline int is_aead(dpaa_sec_session *ses)
228 {
229         return ((ses->cipher_alg == 0) &&
230                 (ses->auth_alg == 0) &&
231                 (ses->aead_alg != 0));
232 }
233
234 static inline int is_encode(dpaa_sec_session *ses)
235 {
236         return ses->dir == DIR_ENC;
237 }
238
239 static inline int is_decode(dpaa_sec_session *ses)
240 {
241         return ses->dir == DIR_DEC;
242 }
243
244 #ifdef RTE_LIB_SECURITY
245 static int
246 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
247 {
248         struct alginfo authdata = {0}, cipherdata = {0};
249         struct sec_cdb *cdb = &ses->cdb;
250         struct alginfo *p_authdata = NULL;
251         int32_t shared_desc_len = 0;
252 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
253         int swap = false;
254 #else
255         int swap = true;
256 #endif
257
258         cipherdata.key = (size_t)ses->cipher_key.data;
259         cipherdata.keylen = ses->cipher_key.length;
260         cipherdata.key_enc_flags = 0;
261         cipherdata.key_type = RTA_DATA_IMM;
262         cipherdata.algtype = ses->cipher_key.alg;
263         cipherdata.algmode = ses->cipher_key.algmode;
264
265         if (ses->auth_alg) {
266                 authdata.key = (size_t)ses->auth_key.data;
267                 authdata.keylen = ses->auth_key.length;
268                 authdata.key_enc_flags = 0;
269                 authdata.key_type = RTA_DATA_IMM;
270                 authdata.algtype = ses->auth_key.alg;
271                 authdata.algmode = ses->auth_key.algmode;
272
273                 p_authdata = &authdata;
274         }
275
276         if (ses->pdcp.sdap_enabled) {
277                 int nb_keys_to_inline =
278                                 rta_inline_pdcp_sdap_query(authdata.algtype,
279                                         cipherdata.algtype,
280                                         ses->pdcp.sn_size,
281                                         ses->pdcp.hfn_ovd);
282                 if (nb_keys_to_inline >= 1) {
283                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
284                                                 (size_t)cipherdata.key);
285                         cipherdata.key_type = RTA_DATA_PTR;
286                 }
287                 if (nb_keys_to_inline >= 2) {
288                         authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
289                                                 (size_t)authdata.key);
290                         authdata.key_type = RTA_DATA_PTR;
291                 }
292         } else {
293                 if (rta_inline_pdcp_query(authdata.algtype,
294                                         cipherdata.algtype,
295                                         ses->pdcp.sn_size,
296                                         ses->pdcp.hfn_ovd)) {
297                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
298                                                 (size_t)cipherdata.key);
299                         cipherdata.key_type = RTA_DATA_PTR;
300                 }
301         }
302
303         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
304                 if (ses->dir == DIR_ENC)
305                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
306                                         cdb->sh_desc, 1, swap,
307                                         ses->pdcp.hfn,
308                                         ses->pdcp.sn_size,
309                                         ses->pdcp.bearer,
310                                         ses->pdcp.pkt_dir,
311                                         ses->pdcp.hfn_threshold,
312                                         &cipherdata, &authdata);
313                 else if (ses->dir == DIR_DEC)
314                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
315                                         cdb->sh_desc, 1, swap,
316                                         ses->pdcp.hfn,
317                                         ses->pdcp.sn_size,
318                                         ses->pdcp.bearer,
319                                         ses->pdcp.pkt_dir,
320                                         ses->pdcp.hfn_threshold,
321                                         &cipherdata, &authdata);
322         } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
323                 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
324                                                      1, swap, &authdata);
325         } else {
326                 if (ses->dir == DIR_ENC) {
327                         if (ses->pdcp.sdap_enabled)
328                                 shared_desc_len =
329                                         cnstr_shdsc_pdcp_sdap_u_plane_encap(
330                                                 cdb->sh_desc, 1, swap,
331                                                 ses->pdcp.sn_size,
332                                                 ses->pdcp.hfn,
333                                                 ses->pdcp.bearer,
334                                                 ses->pdcp.pkt_dir,
335                                                 ses->pdcp.hfn_threshold,
336                                                 &cipherdata, p_authdata);
337                         else
338                                 shared_desc_len =
339                                         cnstr_shdsc_pdcp_u_plane_encap(
340                                                 cdb->sh_desc, 1, swap,
341                                                 ses->pdcp.sn_size,
342                                                 ses->pdcp.hfn,
343                                                 ses->pdcp.bearer,
344                                                 ses->pdcp.pkt_dir,
345                                                 ses->pdcp.hfn_threshold,
346                                                 &cipherdata, p_authdata);
347                 } else if (ses->dir == DIR_DEC) {
348                         if (ses->pdcp.sdap_enabled)
349                                 shared_desc_len =
350                                         cnstr_shdsc_pdcp_sdap_u_plane_decap(
351                                                 cdb->sh_desc, 1, swap,
352                                                 ses->pdcp.sn_size,
353                                                 ses->pdcp.hfn,
354                                                 ses->pdcp.bearer,
355                                                 ses->pdcp.pkt_dir,
356                                                 ses->pdcp.hfn_threshold,
357                                                 &cipherdata, p_authdata);
358                         else
359                                 shared_desc_len =
360                                         cnstr_shdsc_pdcp_u_plane_decap(
361                                                 cdb->sh_desc, 1, swap,
362                                                 ses->pdcp.sn_size,
363                                                 ses->pdcp.hfn,
364                                                 ses->pdcp.bearer,
365                                                 ses->pdcp.pkt_dir,
366                                                 ses->pdcp.hfn_threshold,
367                                                 &cipherdata, p_authdata);
368                 }
369         }
370         return shared_desc_len;
371 }
372
373 /* prepare ipsec proto command block of the session */
374 static int
375 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
376 {
377         struct alginfo cipherdata = {0}, authdata = {0};
378         struct sec_cdb *cdb = &ses->cdb;
379         int32_t shared_desc_len = 0;
380         int err;
381 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
382         int swap = false;
383 #else
384         int swap = true;
385 #endif
386
387         cipherdata.key = (size_t)ses->cipher_key.data;
388         cipherdata.keylen = ses->cipher_key.length;
389         cipherdata.key_enc_flags = 0;
390         cipherdata.key_type = RTA_DATA_IMM;
391         cipherdata.algtype = ses->cipher_key.alg;
392         cipherdata.algmode = ses->cipher_key.algmode;
393
394         if (ses->auth_key.length) {
395                 authdata.key = (size_t)ses->auth_key.data;
396                 authdata.keylen = ses->auth_key.length;
397                 authdata.key_enc_flags = 0;
398                 authdata.key_type = RTA_DATA_IMM;
399                 authdata.algtype = ses->auth_key.alg;
400                 authdata.algmode = ses->auth_key.algmode;
401         }
402
403         cdb->sh_desc[0] = cipherdata.keylen;
404         cdb->sh_desc[1] = authdata.keylen;
405         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
406                                DESC_JOB_IO_LEN,
407                                (unsigned int *)cdb->sh_desc,
408                                &cdb->sh_desc[2], 2);
409
410         if (err < 0) {
411                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
412                 return err;
413         }
414         if (cdb->sh_desc[2] & 1)
415                 cipherdata.key_type = RTA_DATA_IMM;
416         else {
417                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
418                                         (void *)(size_t)cipherdata.key);
419                 cipherdata.key_type = RTA_DATA_PTR;
420         }
421         if (cdb->sh_desc[2] & (1<<1))
422                 authdata.key_type = RTA_DATA_IMM;
423         else {
424                 authdata.key = (size_t)rte_dpaa_mem_vtop(
425                                         (void *)(size_t)authdata.key);
426                 authdata.key_type = RTA_DATA_PTR;
427         }
428
429         cdb->sh_desc[0] = 0;
430         cdb->sh_desc[1] = 0;
431         cdb->sh_desc[2] = 0;
432         if (ses->dir == DIR_ENC) {
433                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
434                                 cdb->sh_desc,
435                                 true, swap, SHR_SERIAL,
436                                 &ses->encap_pdb,
437                                 (uint8_t *)&ses->ip4_hdr,
438                                 &cipherdata, &authdata);
439         } else if (ses->dir == DIR_DEC) {
440                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
441                                 cdb->sh_desc,
442                                 true, swap, SHR_SERIAL,
443                                 &ses->decap_pdb,
444                                 &cipherdata, &authdata);
445         }
446         return shared_desc_len;
447 }
448 #endif
449 /* prepare command block of the session */
450 static int
451 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
452 {
453         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
454         int32_t shared_desc_len = 0;
455         struct sec_cdb *cdb = &ses->cdb;
456         int err;
457 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
458         int swap = false;
459 #else
460         int swap = true;
461 #endif
462
463         memset(cdb, 0, sizeof(struct sec_cdb));
464
465         switch (ses->ctxt) {
466 #ifdef RTE_LIB_SECURITY
467         case DPAA_SEC_IPSEC:
468                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
469                 break;
470         case DPAA_SEC_PDCP:
471                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
472                 break;
473 #endif
474         case DPAA_SEC_CIPHER:
475                 alginfo_c.key = (size_t)ses->cipher_key.data;
476                 alginfo_c.keylen = ses->cipher_key.length;
477                 alginfo_c.key_enc_flags = 0;
478                 alginfo_c.key_type = RTA_DATA_IMM;
479                 alginfo_c.algtype = ses->cipher_key.alg;
480                 alginfo_c.algmode = ses->cipher_key.algmode;
481
482                 switch (ses->cipher_alg) {
483                 case RTE_CRYPTO_CIPHER_AES_CBC:
484                 case RTE_CRYPTO_CIPHER_3DES_CBC:
485                 case RTE_CRYPTO_CIPHER_DES_CBC:
486                 case RTE_CRYPTO_CIPHER_AES_CTR:
487                 case RTE_CRYPTO_CIPHER_3DES_CTR:
488                         shared_desc_len = cnstr_shdsc_blkcipher(
489                                         cdb->sh_desc, true,
490                                         swap, SHR_NEVER, &alginfo_c,
491                                         ses->iv.length,
492                                         ses->dir);
493                         break;
494                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
495                         shared_desc_len = cnstr_shdsc_snow_f8(
496                                         cdb->sh_desc, true, swap,
497                                         &alginfo_c,
498                                         ses->dir);
499                         break;
500                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
501                         shared_desc_len = cnstr_shdsc_zuce(
502                                         cdb->sh_desc, true, swap,
503                                         &alginfo_c,
504                                         ses->dir);
505                         break;
506                 default:
507                         DPAA_SEC_ERR("unsupported cipher alg %d",
508                                      ses->cipher_alg);
509                         return -ENOTSUP;
510                 }
511                 break;
512         case DPAA_SEC_AUTH:
513                 alginfo_a.key = (size_t)ses->auth_key.data;
514                 alginfo_a.keylen = ses->auth_key.length;
515                 alginfo_a.key_enc_flags = 0;
516                 alginfo_a.key_type = RTA_DATA_IMM;
517                 alginfo_a.algtype = ses->auth_key.alg;
518                 alginfo_a.algmode = ses->auth_key.algmode;
519                 switch (ses->auth_alg) {
520                 case RTE_CRYPTO_AUTH_MD5:
521                 case RTE_CRYPTO_AUTH_SHA1:
522                 case RTE_CRYPTO_AUTH_SHA224:
523                 case RTE_CRYPTO_AUTH_SHA256:
524                 case RTE_CRYPTO_AUTH_SHA384:
525                 case RTE_CRYPTO_AUTH_SHA512:
526                         shared_desc_len = cnstr_shdsc_hash(
527                                                 cdb->sh_desc, true,
528                                                 swap, SHR_NEVER, &alginfo_a,
529                                                 !ses->dir,
530                                                 ses->digest_length);
531                         break;
532                 case RTE_CRYPTO_AUTH_MD5_HMAC:
533                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
534                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
535                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
536                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
537                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
538                         shared_desc_len = cnstr_shdsc_hmac(
539                                                 cdb->sh_desc, true,
540                                                 swap, SHR_NEVER, &alginfo_a,
541                                                 !ses->dir,
542                                                 ses->digest_length);
543                         break;
544                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
545                         shared_desc_len = cnstr_shdsc_snow_f9(
546                                                 cdb->sh_desc, true, swap,
547                                                 &alginfo_a,
548                                                 !ses->dir,
549                                                 ses->digest_length);
550                         break;
551                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
552                         shared_desc_len = cnstr_shdsc_zuca(
553                                                 cdb->sh_desc, true, swap,
554                                                 &alginfo_a,
555                                                 !ses->dir,
556                                                 ses->digest_length);
557                         break;
558                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
559                 case RTE_CRYPTO_AUTH_AES_CMAC:
560                         shared_desc_len = cnstr_shdsc_aes_mac(
561                                                 cdb->sh_desc,
562                                                 true, swap, SHR_NEVER,
563                                                 &alginfo_a,
564                                                 !ses->dir,
565                                                 ses->digest_length);
566                         break;
567                 default:
568                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
569                 }
570                 break;
571         case DPAA_SEC_AEAD:
572                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
573                         DPAA_SEC_ERR("not supported aead alg");
574                         return -ENOTSUP;
575                 }
576                 alginfo.key = (size_t)ses->aead_key.data;
577                 alginfo.keylen = ses->aead_key.length;
578                 alginfo.key_enc_flags = 0;
579                 alginfo.key_type = RTA_DATA_IMM;
580                 alginfo.algtype = ses->aead_key.alg;
581                 alginfo.algmode = ses->aead_key.algmode;
582
583                 if (ses->dir == DIR_ENC)
584                         shared_desc_len = cnstr_shdsc_gcm_encap(
585                                         cdb->sh_desc, true, swap, SHR_NEVER,
586                                         &alginfo,
587                                         ses->iv.length,
588                                         ses->digest_length);
589                 else
590                         shared_desc_len = cnstr_shdsc_gcm_decap(
591                                         cdb->sh_desc, true, swap, SHR_NEVER,
592                                         &alginfo,
593                                         ses->iv.length,
594                                         ses->digest_length);
595                 break;
596         case DPAA_SEC_CIPHER_HASH:
597                 alginfo_c.key = (size_t)ses->cipher_key.data;
598                 alginfo_c.keylen = ses->cipher_key.length;
599                 alginfo_c.key_enc_flags = 0;
600                 alginfo_c.key_type = RTA_DATA_IMM;
601                 alginfo_c.algtype = ses->cipher_key.alg;
602                 alginfo_c.algmode = ses->cipher_key.algmode;
603
604                 alginfo_a.key = (size_t)ses->auth_key.data;
605                 alginfo_a.keylen = ses->auth_key.length;
606                 alginfo_a.key_enc_flags = 0;
607                 alginfo_a.key_type = RTA_DATA_IMM;
608                 alginfo_a.algtype = ses->auth_key.alg;
609                 alginfo_a.algmode = ses->auth_key.algmode;
610
611                 cdb->sh_desc[0] = alginfo_c.keylen;
612                 cdb->sh_desc[1] = alginfo_a.keylen;
613                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
614                                        DESC_JOB_IO_LEN,
615                                        (unsigned int *)cdb->sh_desc,
616                                        &cdb->sh_desc[2], 2);
617
618                 if (err < 0) {
619                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
620                         return err;
621                 }
622                 if (cdb->sh_desc[2] & 1)
623                         alginfo_c.key_type = RTA_DATA_IMM;
624                 else {
625                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
626                                                 (void *)(size_t)alginfo_c.key);
627                         alginfo_c.key_type = RTA_DATA_PTR;
628                 }
629                 if (cdb->sh_desc[2] & (1<<1))
630                         alginfo_a.key_type = RTA_DATA_IMM;
631                 else {
632                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
633                                                 (void *)(size_t)alginfo_a.key);
634                         alginfo_a.key_type = RTA_DATA_PTR;
635                 }
636                 cdb->sh_desc[0] = 0;
637                 cdb->sh_desc[1] = 0;
638                 cdb->sh_desc[2] = 0;
639                 /* Auth_only_len is set as 0 here and it will be
640                  * overwritten in fd for each packet.
641                  */
642                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
643                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
644                                 ses->iv.length,
645                                 ses->digest_length, ses->dir);
646                 break;
647         case DPAA_SEC_HASH_CIPHER:
648         default:
649                 DPAA_SEC_ERR("error: Unsupported session");
650                 return -ENOTSUP;
651         }
652
653         if (shared_desc_len < 0) {
654                 DPAA_SEC_ERR("error in preparing command block");
655                 return shared_desc_len;
656         }
657
658         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
659         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
660         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
661
662         return 0;
663 }
664
665 static void
666 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
667 {
668         struct dpaa_sec_job *job = &ctx->job;
669         struct rte_crypto_op *op = ctx->op;
670         dpaa_sec_session *sess = NULL;
671         struct sec_cdb c_cdb, *cdb;
672         uint8_t bufsize;
673         struct rte_crypto_sym_op *sym_op;
674         struct qm_sg_entry sg[2];
675
676         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
677                 sess = (dpaa_sec_session *)
678                         get_sym_session_private_data(
679                                         op->sym->session,
680                                         dpaa_cryptodev_driver_id);
681 #ifdef RTE_LIBRTE_SECURITY
682         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
683                 sess = (dpaa_sec_session *)
684                         get_sec_session_private_data(
685                                         op->sym->sec_session);
686 #endif
687         if (sess == NULL) {
688                 printf("session is NULL\n");
689                 goto mbuf_dump;
690         }
691
692         cdb = &sess->cdb;
693         rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
694 #ifdef RTE_LIBRTE_SECURITY
695         printf("\nsession protocol type = %d\n", sess->proto_alg);
696 #endif
697         printf("\n****************************************\n"
698                 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
699                 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
700                 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
701                 "\tCipher algmode:\t%d\n", sess->ctxt,
702                 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
703                 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
704                 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
705                 sess->cipher_key.algmode);
706                 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
707                                 sess->cipher_key.length);
708                 rte_hexdump(stdout, "auth key", sess->auth_key.data,
709                                 sess->auth_key.length);
710         printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
711                 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
712                 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
713                 "\taead cipher text:\t%d\n",
714                 (uint64_t)sess->auth_key.length, sess->auth_key.alg,
715                 sess->auth_key.algmode,
716                 sess->iv.length, sess->iv.offset,
717                 sess->digest_length, sess->auth_only_len,
718                 sess->auth_cipher_text);
719 #ifdef RTE_LIBRTE_SECURITY
720         printf("PDCP session params:\n"
721                 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
722                 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
723                 "\t%d\n\thfn:\t\t%d\n"
724                 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
725                 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
726                 sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
727                 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
728                 sess->pdcp.hfn_threshold);
729 #endif
730         c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
731         c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
732         bufsize = c_cdb.sh_hdr.hi.field.idlen;
733
734         printf("cdb = %p\n\n", cdb);
735         printf("Descriptor size = %d\n", bufsize);
736         int m;
737         for (m = 0; m < bufsize; m++)
738                 printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
739
740         printf("\n");
741 mbuf_dump:
742         sym_op = op->sym;
743         if (sym_op->m_src) {
744                 printf("Source mbuf:\n");
745                 rte_pktmbuf_dump(stdout, sym_op->m_src,
746                                  sym_op->m_src->data_len);
747         }
748         if (sym_op->m_dst) {
749                 printf("Destination mbuf:\n");
750                 rte_pktmbuf_dump(stdout, sym_op->m_dst,
751                                  sym_op->m_dst->data_len);
752         }
753
754         printf("Session address = %p\ncipher offset: %d, length: %d\n"
755                 "auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
756                 sym_op->session, sym_op->cipher.data.offset,
757                 sym_op->cipher.data.length,
758                 sym_op->auth.data.offset, sym_op->auth.data.length,
759                 sym_op->aead.data.offset, sym_op->aead.data.length);
760         printf("\n");
761
762         printf("******************************************************\n");
763         printf("ctx info:\n");
764         printf("job->sg[0] output info:\n");
765         memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
766         printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
767                 "\n\tbpid = %d\n\toffset = %d\n",
768                 (uint64_t)sg[0].addr, sg[0].length, sg[0].final,
769                 sg[0].extension, sg[0].bpid, sg[0].offset);
770         printf("\njob->sg[1] input info:\n");
771         memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
772         hw_sg_to_cpu(&sg[1]);
773         printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
774                 "\n\tbpid = %d\n\toffset = %d\n",
775                 (uint64_t)sg[1].addr, sg[1].length, sg[1].final,
776                 sg[1].extension, sg[1].bpid, sg[1].offset);
777
778         printf("\nctx pool addr = %p\n", ctx->ctx_pool);
779         if (ctx->ctx_pool)
780                 printf("ctx pool available counts = %d\n",
781                         rte_mempool_avail_count(ctx->ctx_pool));
782
783         printf("\nop pool addr = %p\n", op->mempool);
784         if (op->mempool)
785                 printf("op pool available counts = %d\n",
786                         rte_mempool_avail_count(op->mempool));
787
788         printf("********************************************************\n");
789         printf("Queue data:\n");
790         printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
791                 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
792                "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
793                 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
794                 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
795                 qp->rx_errs, qp->tx_errs);
796 }
797
798 /* qp is lockless, should be accessed by only one thread */
799 static int
800 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
801 {
802         struct qman_fq *fq;
803         unsigned int pkts = 0;
804         int num_rx_bufs, ret;
805         struct qm_dqrr_entry *dq;
806         uint32_t vdqcr_flags = 0;
807
808         fq = &qp->outq;
809         /*
810          * Until request for four buffers, we provide exact number of buffers.
811          * Otherwise we do not set the QM_VDQCR_EXACT flag.
812          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
813          * requested, so we request two less in this case.
814          */
815         if (nb_ops < 4) {
816                 vdqcr_flags = QM_VDQCR_EXACT;
817                 num_rx_bufs = nb_ops;
818         } else {
819                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
820                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
821         }
822         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
823         if (ret)
824                 return 0;
825
826         do {
827                 const struct qm_fd *fd;
828                 struct dpaa_sec_job *job;
829                 struct dpaa_sec_op_ctx *ctx;
830                 struct rte_crypto_op *op;
831
832                 dq = qman_dequeue(fq);
833                 if (!dq)
834                         continue;
835
836                 fd = &dq->fd;
837                 /* sg is embedded in an op ctx,
838                  * sg[0] is for output
839                  * sg[1] for input
840                  */
841                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
842
843                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
844                 ctx->fd_status = fd->status;
845                 op = ctx->op;
846                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
847                         struct qm_sg_entry *sg_out;
848                         uint32_t len;
849                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
850                                                 op->sym->m_src : op->sym->m_dst;
851
852                         sg_out = &job->sg[0];
853                         hw_sg_to_cpu(sg_out);
854                         len = sg_out->length;
855                         mbuf->pkt_len = len;
856                         while (mbuf->next != NULL) {
857                                 len -= mbuf->data_len;
858                                 mbuf = mbuf->next;
859                         }
860                         mbuf->data_len = len;
861                 }
862                 if (!ctx->fd_status) {
863                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
864                 } else {
865                         if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
866                                 DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
867                                                   ctx->fd_status);
868                                 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
869                                         dpaa_sec_dump(ctx, qp);
870                         }
871                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
872                 }
873                 ops[pkts++] = op;
874
875                 /* report op status to sym->op and then free the ctx memory */
876                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
877
878                 qman_dqrr_consume(fq, dq);
879         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
880
881         return pkts;
882 }
883
884 static inline struct dpaa_sec_job *
885 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
886 {
887         struct rte_crypto_sym_op *sym = op->sym;
888         struct rte_mbuf *mbuf = sym->m_src;
889         struct dpaa_sec_job *cf;
890         struct dpaa_sec_op_ctx *ctx;
891         struct qm_sg_entry *sg, *out_sg, *in_sg;
892         phys_addr_t start_addr;
893         uint8_t *old_digest, extra_segs;
894         int data_len, data_offset;
895
896         data_len = sym->auth.data.length;
897         data_offset = sym->auth.data.offset;
898
899         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
900             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
901                 if ((data_len & 7) || (data_offset & 7)) {
902                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
903                         return NULL;
904                 }
905
906                 data_len = data_len >> 3;
907                 data_offset = data_offset >> 3;
908         }
909
910         if (is_decode(ses))
911                 extra_segs = 3;
912         else
913                 extra_segs = 2;
914
915         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
916                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
917                                 MAX_SG_ENTRIES);
918                 return NULL;
919         }
920         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
921         if (!ctx)
922                 return NULL;
923
924         cf = &ctx->job;
925         ctx->op = op;
926         old_digest = ctx->digest;
927
928         /* output */
929         out_sg = &cf->sg[0];
930         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
931         out_sg->length = ses->digest_length;
932         cpu_to_hw_sg(out_sg);
933
934         /* input */
935         in_sg = &cf->sg[1];
936         /* need to extend the input to a compound frame */
937         in_sg->extension = 1;
938         in_sg->final = 1;
939         in_sg->length = data_len;
940         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
941
942         /* 1st seg */
943         sg = in_sg + 1;
944
945         if (ses->iv.length) {
946                 uint8_t *iv_ptr;
947
948                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
949                                                    ses->iv.offset);
950
951                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
952                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
953                         sg->length = 12;
954                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
955                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
956                         sg->length = 8;
957                 } else {
958                         sg->length = ses->iv.length;
959                 }
960                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
961                 in_sg->length += sg->length;
962                 cpu_to_hw_sg(sg);
963                 sg++;
964         }
965
966         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
967         sg->offset = data_offset;
968
969         if (data_len <= (mbuf->data_len - data_offset)) {
970                 sg->length = data_len;
971         } else {
972                 sg->length = mbuf->data_len - data_offset;
973
974                 /* remaining i/p segs */
975                 while ((data_len = data_len - sg->length) &&
976                        (mbuf = mbuf->next)) {
977                         cpu_to_hw_sg(sg);
978                         sg++;
979                         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
980                         if (data_len > mbuf->data_len)
981                                 sg->length = mbuf->data_len;
982                         else
983                                 sg->length = data_len;
984                 }
985         }
986
987         if (is_decode(ses)) {
988                 /* Digest verification case */
989                 cpu_to_hw_sg(sg);
990                 sg++;
991                 rte_memcpy(old_digest, sym->auth.digest.data,
992                                 ses->digest_length);
993                 start_addr = rte_dpaa_mem_vtop(old_digest);
994                 qm_sg_entry_set64(sg, start_addr);
995                 sg->length = ses->digest_length;
996                 in_sg->length += ses->digest_length;
997         }
998         sg->final = 1;
999         cpu_to_hw_sg(sg);
1000         cpu_to_hw_sg(in_sg);
1001
1002         return cf;
1003 }
1004
1005 /**
1006  * packet looks like:
1007  *              |<----data_len------->|
1008  *    |ip_header|ah_header|icv|payload|
1009  *              ^
1010  *              |
1011  *         mbuf->pkt.data
1012  */
1013 static inline struct dpaa_sec_job *
1014 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1015 {
1016         struct rte_crypto_sym_op *sym = op->sym;
1017         struct rte_mbuf *mbuf = sym->m_src;
1018         struct dpaa_sec_job *cf;
1019         struct dpaa_sec_op_ctx *ctx;
1020         struct qm_sg_entry *sg, *in_sg;
1021         rte_iova_t start_addr;
1022         uint8_t *old_digest;
1023         int data_len, data_offset;
1024
1025         data_len = sym->auth.data.length;
1026         data_offset = sym->auth.data.offset;
1027
1028         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1029             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1030                 if ((data_len & 7) || (data_offset & 7)) {
1031                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1032                         return NULL;
1033                 }
1034
1035                 data_len = data_len >> 3;
1036                 data_offset = data_offset >> 3;
1037         }
1038
1039         ctx = dpaa_sec_alloc_ctx(ses, 4);
1040         if (!ctx)
1041                 return NULL;
1042
1043         cf = &ctx->job;
1044         ctx->op = op;
1045         old_digest = ctx->digest;
1046
1047         start_addr = rte_pktmbuf_iova(mbuf);
1048         /* output */
1049         sg = &cf->sg[0];
1050         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1051         sg->length = ses->digest_length;
1052         cpu_to_hw_sg(sg);
1053
1054         /* input */
1055         in_sg = &cf->sg[1];
1056         /* need to extend the input to a compound frame */
1057         in_sg->extension = 1;
1058         in_sg->final = 1;
1059         in_sg->length = data_len;
1060         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1061         sg = &cf->sg[2];
1062
1063         if (ses->iv.length) {
1064                 uint8_t *iv_ptr;
1065
1066                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1067                                                    ses->iv.offset);
1068
1069                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1070                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1071                         sg->length = 12;
1072                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1073                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1074                         sg->length = 8;
1075                 } else {
1076                         sg->length = ses->iv.length;
1077                 }
1078                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1079                 in_sg->length += sg->length;
1080                 cpu_to_hw_sg(sg);
1081                 sg++;
1082         }
1083
1084         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1085         sg->offset = data_offset;
1086         sg->length = data_len;
1087
1088         if (is_decode(ses)) {
1089                 /* Digest verification case */
1090                 cpu_to_hw_sg(sg);
1091                 /* hash result or digest, save digest first */
1092                 rte_memcpy(old_digest, sym->auth.digest.data,
1093                                 ses->digest_length);
1094                 /* let's check digest by hw */
1095                 start_addr = rte_dpaa_mem_vtop(old_digest);
1096                 sg++;
1097                 qm_sg_entry_set64(sg, start_addr);
1098                 sg->length = ses->digest_length;
1099                 in_sg->length += ses->digest_length;
1100         }
1101         sg->final = 1;
1102         cpu_to_hw_sg(sg);
1103         cpu_to_hw_sg(in_sg);
1104
1105         return cf;
1106 }
1107
1108 static inline struct dpaa_sec_job *
1109 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1110 {
1111         struct rte_crypto_sym_op *sym = op->sym;
1112         struct dpaa_sec_job *cf;
1113         struct dpaa_sec_op_ctx *ctx;
1114         struct qm_sg_entry *sg, *out_sg, *in_sg;
1115         struct rte_mbuf *mbuf;
1116         uint8_t req_segs;
1117         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1118                         ses->iv.offset);
1119         int data_len, data_offset;
1120
1121         data_len = sym->cipher.data.length;
1122         data_offset = sym->cipher.data.offset;
1123
1124         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1125                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1126                 if ((data_len & 7) || (data_offset & 7)) {
1127                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1128                         return NULL;
1129                 }
1130
1131                 data_len = data_len >> 3;
1132                 data_offset = data_offset >> 3;
1133         }
1134
1135         if (sym->m_dst) {
1136                 mbuf = sym->m_dst;
1137                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1138         } else {
1139                 mbuf = sym->m_src;
1140                 req_segs = mbuf->nb_segs * 2 + 3;
1141         }
1142         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1143                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1144                                 MAX_SG_ENTRIES);
1145                 return NULL;
1146         }
1147
1148         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1149         if (!ctx)
1150                 return NULL;
1151
1152         cf = &ctx->job;
1153         ctx->op = op;
1154
1155         /* output */
1156         out_sg = &cf->sg[0];
1157         out_sg->extension = 1;
1158         out_sg->length = data_len;
1159         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1160         cpu_to_hw_sg(out_sg);
1161
1162         /* 1st seg */
1163         sg = &cf->sg[2];
1164         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1165         sg->length = mbuf->data_len - data_offset;
1166         sg->offset = data_offset;
1167
1168         /* Successive segs */
1169         mbuf = mbuf->next;
1170         while (mbuf) {
1171                 cpu_to_hw_sg(sg);
1172                 sg++;
1173                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1174                 sg->length = mbuf->data_len;
1175                 mbuf = mbuf->next;
1176         }
1177         sg->final = 1;
1178         cpu_to_hw_sg(sg);
1179
1180         /* input */
1181         mbuf = sym->m_src;
1182         in_sg = &cf->sg[1];
1183         in_sg->extension = 1;
1184         in_sg->final = 1;
1185         in_sg->length = data_len + ses->iv.length;
1186
1187         sg++;
1188         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1189         cpu_to_hw_sg(in_sg);
1190
1191         /* IV */
1192         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1193         sg->length = ses->iv.length;
1194         cpu_to_hw_sg(sg);
1195
1196         /* 1st seg */
1197         sg++;
1198         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1199         sg->length = mbuf->data_len - data_offset;
1200         sg->offset = data_offset;
1201
1202         /* Successive segs */
1203         mbuf = mbuf->next;
1204         while (mbuf) {
1205                 cpu_to_hw_sg(sg);
1206                 sg++;
1207                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1208                 sg->length = mbuf->data_len;
1209                 mbuf = mbuf->next;
1210         }
1211         sg->final = 1;
1212         cpu_to_hw_sg(sg);
1213
1214         return cf;
1215 }
1216
1217 static inline struct dpaa_sec_job *
1218 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1219 {
1220         struct rte_crypto_sym_op *sym = op->sym;
1221         struct dpaa_sec_job *cf;
1222         struct dpaa_sec_op_ctx *ctx;
1223         struct qm_sg_entry *sg;
1224         rte_iova_t src_start_addr, dst_start_addr;
1225         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1226                         ses->iv.offset);
1227         int data_len, data_offset;
1228
1229         data_len = sym->cipher.data.length;
1230         data_offset = sym->cipher.data.offset;
1231
1232         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1233                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1234                 if ((data_len & 7) || (data_offset & 7)) {
1235                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1236                         return NULL;
1237                 }
1238
1239                 data_len = data_len >> 3;
1240                 data_offset = data_offset >> 3;
1241         }
1242
1243         ctx = dpaa_sec_alloc_ctx(ses, 4);
1244         if (!ctx)
1245                 return NULL;
1246
1247         cf = &ctx->job;
1248         ctx->op = op;
1249
1250         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1251
1252         if (sym->m_dst)
1253                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1254         else
1255                 dst_start_addr = src_start_addr;
1256
1257         /* output */
1258         sg = &cf->sg[0];
1259         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1260         sg->length = data_len + ses->iv.length;
1261         cpu_to_hw_sg(sg);
1262
1263         /* input */
1264         sg = &cf->sg[1];
1265
1266         /* need to extend the input to a compound frame */
1267         sg->extension = 1;
1268         sg->final = 1;
1269         sg->length = data_len + ses->iv.length;
1270         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1271         cpu_to_hw_sg(sg);
1272
1273         sg = &cf->sg[2];
1274         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1275         sg->length = ses->iv.length;
1276         cpu_to_hw_sg(sg);
1277
1278         sg++;
1279         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1280         sg->length = data_len;
1281         sg->final = 1;
1282         cpu_to_hw_sg(sg);
1283
1284         return cf;
1285 }
1286
1287 static inline struct dpaa_sec_job *
1288 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1289 {
1290         struct rte_crypto_sym_op *sym = op->sym;
1291         struct dpaa_sec_job *cf;
1292         struct dpaa_sec_op_ctx *ctx;
1293         struct qm_sg_entry *sg, *out_sg, *in_sg;
1294         struct rte_mbuf *mbuf;
1295         uint8_t req_segs;
1296         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1297                         ses->iv.offset);
1298
1299         if (sym->m_dst) {
1300                 mbuf = sym->m_dst;
1301                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1302         } else {
1303                 mbuf = sym->m_src;
1304                 req_segs = mbuf->nb_segs * 2 + 4;
1305         }
1306
1307         if (ses->auth_only_len)
1308                 req_segs++;
1309
1310         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1311                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1312                                 MAX_SG_ENTRIES);
1313                 return NULL;
1314         }
1315
1316         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1317         if (!ctx)
1318                 return NULL;
1319
1320         cf = &ctx->job;
1321         ctx->op = op;
1322
1323         rte_prefetch0(cf->sg);
1324
1325         /* output */
1326         out_sg = &cf->sg[0];
1327         out_sg->extension = 1;
1328         if (is_encode(ses))
1329                 out_sg->length = sym->aead.data.length + ses->digest_length;
1330         else
1331                 out_sg->length = sym->aead.data.length;
1332
1333         /* output sg entries */
1334         sg = &cf->sg[2];
1335         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1336         cpu_to_hw_sg(out_sg);
1337
1338         /* 1st seg */
1339         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1340         sg->length = mbuf->data_len - sym->aead.data.offset;
1341         sg->offset = sym->aead.data.offset;
1342
1343         /* Successive segs */
1344         mbuf = mbuf->next;
1345         while (mbuf) {
1346                 cpu_to_hw_sg(sg);
1347                 sg++;
1348                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1349                 sg->length = mbuf->data_len;
1350                 mbuf = mbuf->next;
1351         }
1352         sg->length -= ses->digest_length;
1353
1354         if (is_encode(ses)) {
1355                 cpu_to_hw_sg(sg);
1356                 /* set auth output */
1357                 sg++;
1358                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1359                 sg->length = ses->digest_length;
1360         }
1361         sg->final = 1;
1362         cpu_to_hw_sg(sg);
1363
1364         /* input */
1365         mbuf = sym->m_src;
1366         in_sg = &cf->sg[1];
1367         in_sg->extension = 1;
1368         in_sg->final = 1;
1369         if (is_encode(ses))
1370                 in_sg->length = ses->iv.length + sym->aead.data.length
1371                                                         + ses->auth_only_len;
1372         else
1373                 in_sg->length = ses->iv.length + sym->aead.data.length
1374                                 + ses->auth_only_len + ses->digest_length;
1375
1376         /* input sg entries */
1377         sg++;
1378         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1379         cpu_to_hw_sg(in_sg);
1380
1381         /* 1st seg IV */
1382         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1383         sg->length = ses->iv.length;
1384         cpu_to_hw_sg(sg);
1385
1386         /* 2nd seg auth only */
1387         if (ses->auth_only_len) {
1388                 sg++;
1389                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1390                 sg->length = ses->auth_only_len;
1391                 cpu_to_hw_sg(sg);
1392         }
1393
1394         /* 3rd seg */
1395         sg++;
1396         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1397         sg->length = mbuf->data_len - sym->aead.data.offset;
1398         sg->offset = sym->aead.data.offset;
1399
1400         /* Successive segs */
1401         mbuf = mbuf->next;
1402         while (mbuf) {
1403                 cpu_to_hw_sg(sg);
1404                 sg++;
1405                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1406                 sg->length = mbuf->data_len;
1407                 mbuf = mbuf->next;
1408         }
1409
1410         if (is_decode(ses)) {
1411                 cpu_to_hw_sg(sg);
1412                 sg++;
1413                 memcpy(ctx->digest, sym->aead.digest.data,
1414                         ses->digest_length);
1415                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1416                 sg->length = ses->digest_length;
1417         }
1418         sg->final = 1;
1419         cpu_to_hw_sg(sg);
1420
1421         return cf;
1422 }
1423
1424 static inline struct dpaa_sec_job *
1425 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1426 {
1427         struct rte_crypto_sym_op *sym = op->sym;
1428         struct dpaa_sec_job *cf;
1429         struct dpaa_sec_op_ctx *ctx;
1430         struct qm_sg_entry *sg;
1431         uint32_t length = 0;
1432         rte_iova_t src_start_addr, dst_start_addr;
1433         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1434                         ses->iv.offset);
1435
1436         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1437
1438         if (sym->m_dst)
1439                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1440         else
1441                 dst_start_addr = src_start_addr;
1442
1443         ctx = dpaa_sec_alloc_ctx(ses, 7);
1444         if (!ctx)
1445                 return NULL;
1446
1447         cf = &ctx->job;
1448         ctx->op = op;
1449
1450         /* input */
1451         rte_prefetch0(cf->sg);
1452         sg = &cf->sg[2];
1453         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1454         if (is_encode(ses)) {
1455                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1456                 sg->length = ses->iv.length;
1457                 length += sg->length;
1458                 cpu_to_hw_sg(sg);
1459
1460                 sg++;
1461                 if (ses->auth_only_len) {
1462                         qm_sg_entry_set64(sg,
1463                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1464                         sg->length = ses->auth_only_len;
1465                         length += sg->length;
1466                         cpu_to_hw_sg(sg);
1467                         sg++;
1468                 }
1469                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1470                 sg->length = sym->aead.data.length;
1471                 length += sg->length;
1472                 sg->final = 1;
1473                 cpu_to_hw_sg(sg);
1474         } else {
1475                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1476                 sg->length = ses->iv.length;
1477                 length += sg->length;
1478                 cpu_to_hw_sg(sg);
1479
1480                 sg++;
1481                 if (ses->auth_only_len) {
1482                         qm_sg_entry_set64(sg,
1483                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1484                         sg->length = ses->auth_only_len;
1485                         length += sg->length;
1486                         cpu_to_hw_sg(sg);
1487                         sg++;
1488                 }
1489                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1490                 sg->length = sym->aead.data.length;
1491                 length += sg->length;
1492                 cpu_to_hw_sg(sg);
1493
1494                 memcpy(ctx->digest, sym->aead.digest.data,
1495                        ses->digest_length);
1496                 sg++;
1497
1498                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1499                 sg->length = ses->digest_length;
1500                 length += sg->length;
1501                 sg->final = 1;
1502                 cpu_to_hw_sg(sg);
1503         }
1504         /* input compound frame */
1505         cf->sg[1].length = length;
1506         cf->sg[1].extension = 1;
1507         cf->sg[1].final = 1;
1508         cpu_to_hw_sg(&cf->sg[1]);
1509
1510         /* output */
1511         sg++;
1512         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1513         qm_sg_entry_set64(sg,
1514                 dst_start_addr + sym->aead.data.offset);
1515         sg->length = sym->aead.data.length;
1516         length = sg->length;
1517         if (is_encode(ses)) {
1518                 cpu_to_hw_sg(sg);
1519                 /* set auth output */
1520                 sg++;
1521                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1522                 sg->length = ses->digest_length;
1523                 length += sg->length;
1524         }
1525         sg->final = 1;
1526         cpu_to_hw_sg(sg);
1527
1528         /* output compound frame */
1529         cf->sg[0].length = length;
1530         cf->sg[0].extension = 1;
1531         cpu_to_hw_sg(&cf->sg[0]);
1532
1533         return cf;
1534 }
1535
1536 static inline struct dpaa_sec_job *
1537 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1538 {
1539         struct rte_crypto_sym_op *sym = op->sym;
1540         struct dpaa_sec_job *cf;
1541         struct dpaa_sec_op_ctx *ctx;
1542         struct qm_sg_entry *sg, *out_sg, *in_sg;
1543         struct rte_mbuf *mbuf;
1544         uint8_t req_segs;
1545         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1546                         ses->iv.offset);
1547
1548         if (sym->m_dst) {
1549                 mbuf = sym->m_dst;
1550                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1551         } else {
1552                 mbuf = sym->m_src;
1553                 req_segs = mbuf->nb_segs * 2 + 4;
1554         }
1555
1556         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1557                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1558                                 MAX_SG_ENTRIES);
1559                 return NULL;
1560         }
1561
1562         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1563         if (!ctx)
1564                 return NULL;
1565
1566         cf = &ctx->job;
1567         ctx->op = op;
1568
1569         rte_prefetch0(cf->sg);
1570
1571         /* output */
1572         out_sg = &cf->sg[0];
1573         out_sg->extension = 1;
1574         if (is_encode(ses))
1575                 out_sg->length = sym->auth.data.length + ses->digest_length;
1576         else
1577                 out_sg->length = sym->auth.data.length;
1578
1579         /* output sg entries */
1580         sg = &cf->sg[2];
1581         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1582         cpu_to_hw_sg(out_sg);
1583
1584         /* 1st seg */
1585         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1586         sg->length = mbuf->data_len - sym->auth.data.offset;
1587         sg->offset = sym->auth.data.offset;
1588
1589         /* Successive segs */
1590         mbuf = mbuf->next;
1591         while (mbuf) {
1592                 cpu_to_hw_sg(sg);
1593                 sg++;
1594                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1595                 sg->length = mbuf->data_len;
1596                 mbuf = mbuf->next;
1597         }
1598         sg->length -= ses->digest_length;
1599
1600         if (is_encode(ses)) {
1601                 cpu_to_hw_sg(sg);
1602                 /* set auth output */
1603                 sg++;
1604                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1605                 sg->length = ses->digest_length;
1606         }
1607         sg->final = 1;
1608         cpu_to_hw_sg(sg);
1609
1610         /* input */
1611         mbuf = sym->m_src;
1612         in_sg = &cf->sg[1];
1613         in_sg->extension = 1;
1614         in_sg->final = 1;
1615         if (is_encode(ses))
1616                 in_sg->length = ses->iv.length + sym->auth.data.length;
1617         else
1618                 in_sg->length = ses->iv.length + sym->auth.data.length
1619                                                 + ses->digest_length;
1620
1621         /* input sg entries */
1622         sg++;
1623         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1624         cpu_to_hw_sg(in_sg);
1625
1626         /* 1st seg IV */
1627         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1628         sg->length = ses->iv.length;
1629         cpu_to_hw_sg(sg);
1630
1631         /* 2nd seg */
1632         sg++;
1633         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1634         sg->length = mbuf->data_len - sym->auth.data.offset;
1635         sg->offset = sym->auth.data.offset;
1636
1637         /* Successive segs */
1638         mbuf = mbuf->next;
1639         while (mbuf) {
1640                 cpu_to_hw_sg(sg);
1641                 sg++;
1642                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1643                 sg->length = mbuf->data_len;
1644                 mbuf = mbuf->next;
1645         }
1646
1647         sg->length -= ses->digest_length;
1648         if (is_decode(ses)) {
1649                 cpu_to_hw_sg(sg);
1650                 sg++;
1651                 memcpy(ctx->digest, sym->auth.digest.data,
1652                         ses->digest_length);
1653                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1654                 sg->length = ses->digest_length;
1655         }
1656         sg->final = 1;
1657         cpu_to_hw_sg(sg);
1658
1659         return cf;
1660 }
1661
1662 static inline struct dpaa_sec_job *
1663 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1664 {
1665         struct rte_crypto_sym_op *sym = op->sym;
1666         struct dpaa_sec_job *cf;
1667         struct dpaa_sec_op_ctx *ctx;
1668         struct qm_sg_entry *sg;
1669         rte_iova_t src_start_addr, dst_start_addr;
1670         uint32_t length = 0;
1671         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1672                         ses->iv.offset);
1673
1674         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1675         if (sym->m_dst)
1676                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1677         else
1678                 dst_start_addr = src_start_addr;
1679
1680         ctx = dpaa_sec_alloc_ctx(ses, 7);
1681         if (!ctx)
1682                 return NULL;
1683
1684         cf = &ctx->job;
1685         ctx->op = op;
1686
1687         /* input */
1688         rte_prefetch0(cf->sg);
1689         sg = &cf->sg[2];
1690         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1691         if (is_encode(ses)) {
1692                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1693                 sg->length = ses->iv.length;
1694                 length += sg->length;
1695                 cpu_to_hw_sg(sg);
1696
1697                 sg++;
1698                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1699                 sg->length = sym->auth.data.length;
1700                 length += sg->length;
1701                 sg->final = 1;
1702                 cpu_to_hw_sg(sg);
1703         } else {
1704                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1705                 sg->length = ses->iv.length;
1706                 length += sg->length;
1707                 cpu_to_hw_sg(sg);
1708
1709                 sg++;
1710
1711                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1712                 sg->length = sym->auth.data.length;
1713                 length += sg->length;
1714                 cpu_to_hw_sg(sg);
1715
1716                 memcpy(ctx->digest, sym->auth.digest.data,
1717                        ses->digest_length);
1718                 sg++;
1719
1720                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1721                 sg->length = ses->digest_length;
1722                 length += sg->length;
1723                 sg->final = 1;
1724                 cpu_to_hw_sg(sg);
1725         }
1726         /* input compound frame */
1727         cf->sg[1].length = length;
1728         cf->sg[1].extension = 1;
1729         cf->sg[1].final = 1;
1730         cpu_to_hw_sg(&cf->sg[1]);
1731
1732         /* output */
1733         sg++;
1734         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1735         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1736         sg->length = sym->cipher.data.length;
1737         length = sg->length;
1738         if (is_encode(ses)) {
1739                 cpu_to_hw_sg(sg);
1740                 /* set auth output */
1741                 sg++;
1742                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1743                 sg->length = ses->digest_length;
1744                 length += sg->length;
1745         }
1746         sg->final = 1;
1747         cpu_to_hw_sg(sg);
1748
1749         /* output compound frame */
1750         cf->sg[0].length = length;
1751         cf->sg[0].extension = 1;
1752         cpu_to_hw_sg(&cf->sg[0]);
1753
1754         return cf;
1755 }
1756
1757 #ifdef RTE_LIB_SECURITY
1758 static inline struct dpaa_sec_job *
1759 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1760 {
1761         struct rte_crypto_sym_op *sym = op->sym;
1762         struct dpaa_sec_job *cf;
1763         struct dpaa_sec_op_ctx *ctx;
1764         struct qm_sg_entry *sg;
1765         phys_addr_t src_start_addr, dst_start_addr;
1766
1767         ctx = dpaa_sec_alloc_ctx(ses, 2);
1768         if (!ctx)
1769                 return NULL;
1770         cf = &ctx->job;
1771         ctx->op = op;
1772
1773         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1774
1775         if (sym->m_dst)
1776                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1777         else
1778                 dst_start_addr = src_start_addr;
1779
1780         /* input */
1781         sg = &cf->sg[1];
1782         qm_sg_entry_set64(sg, src_start_addr);
1783         sg->length = sym->m_src->pkt_len;
1784         sg->final = 1;
1785         cpu_to_hw_sg(sg);
1786
1787         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1788         /* output */
1789         sg = &cf->sg[0];
1790         qm_sg_entry_set64(sg, dst_start_addr);
1791         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1792         cpu_to_hw_sg(sg);
1793
1794         return cf;
1795 }
1796
1797 static inline struct dpaa_sec_job *
1798 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1799 {
1800         struct rte_crypto_sym_op *sym = op->sym;
1801         struct dpaa_sec_job *cf;
1802         struct dpaa_sec_op_ctx *ctx;
1803         struct qm_sg_entry *sg, *out_sg, *in_sg;
1804         struct rte_mbuf *mbuf;
1805         uint8_t req_segs;
1806         uint32_t in_len = 0, out_len = 0;
1807
1808         if (sym->m_dst)
1809                 mbuf = sym->m_dst;
1810         else
1811                 mbuf = sym->m_src;
1812
1813         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1814         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1815                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1816                                 MAX_SG_ENTRIES);
1817                 return NULL;
1818         }
1819
1820         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1821         if (!ctx)
1822                 return NULL;
1823         cf = &ctx->job;
1824         ctx->op = op;
1825         /* output */
1826         out_sg = &cf->sg[0];
1827         out_sg->extension = 1;
1828         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1829
1830         /* 1st seg */
1831         sg = &cf->sg[2];
1832         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1833         sg->offset = 0;
1834
1835         /* Successive segs */
1836         while (mbuf->next) {
1837                 sg->length = mbuf->data_len;
1838                 out_len += sg->length;
1839                 mbuf = mbuf->next;
1840                 cpu_to_hw_sg(sg);
1841                 sg++;
1842                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1843                 sg->offset = 0;
1844         }
1845         sg->length = mbuf->buf_len - mbuf->data_off;
1846         out_len += sg->length;
1847         sg->final = 1;
1848         cpu_to_hw_sg(sg);
1849
1850         out_sg->length = out_len;
1851         cpu_to_hw_sg(out_sg);
1852
1853         /* input */
1854         mbuf = sym->m_src;
1855         in_sg = &cf->sg[1];
1856         in_sg->extension = 1;
1857         in_sg->final = 1;
1858         in_len = mbuf->data_len;
1859
1860         sg++;
1861         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1862
1863         /* 1st seg */
1864         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1865         sg->length = mbuf->data_len;
1866         sg->offset = 0;
1867
1868         /* Successive segs */
1869         mbuf = mbuf->next;
1870         while (mbuf) {
1871                 cpu_to_hw_sg(sg);
1872                 sg++;
1873                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1874                 sg->length = mbuf->data_len;
1875                 sg->offset = 0;
1876                 in_len += sg->length;
1877                 mbuf = mbuf->next;
1878         }
1879         sg->final = 1;
1880         cpu_to_hw_sg(sg);
1881
1882         in_sg->length = in_len;
1883         cpu_to_hw_sg(in_sg);
1884
1885         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1886
1887         return cf;
1888 }
1889 #endif
1890
1891 static uint16_t
1892 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1893                        uint16_t nb_ops)
1894 {
1895         /* Function to transmit the frames to given device and queuepair */
1896         uint32_t loop;
1897         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1898         uint16_t num_tx = 0;
1899         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1900         uint32_t frames_to_send;
1901         struct rte_crypto_op *op;
1902         struct dpaa_sec_job *cf;
1903         dpaa_sec_session *ses;
1904         uint16_t auth_hdr_len, auth_tail_len;
1905         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1906         struct qman_fq *inq[DPAA_SEC_BURST];
1907
1908         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1909                 if (rte_dpaa_portal_init((void *)0)) {
1910                         DPAA_SEC_ERR("Failure in affining portal");
1911                         return 0;
1912                 }
1913         }
1914
1915         while (nb_ops) {
1916                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1917                                 DPAA_SEC_BURST : nb_ops;
1918                 for (loop = 0; loop < frames_to_send; loop++) {
1919                         op = *(ops++);
1920                         if (*dpaa_seqn(op->sym->m_src) != 0) {
1921                                 index = *dpaa_seqn(op->sym->m_src) - 1;
1922                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1923                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1924                                         flags[loop] = ((index & 0x0f) << 8);
1925                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1926                                         DPAA_PER_LCORE_DQRR_SIZE--;
1927                                         DPAA_PER_LCORE_DQRR_HELD &=
1928                                                                 ~(1 << index);
1929                                 }
1930                         }
1931
1932                         switch (op->sess_type) {
1933                         case RTE_CRYPTO_OP_WITH_SESSION:
1934                                 ses = (dpaa_sec_session *)
1935                                         get_sym_session_private_data(
1936                                                 op->sym->session,
1937                                                 dpaa_cryptodev_driver_id);
1938                                 break;
1939 #ifdef RTE_LIB_SECURITY
1940                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1941                                 ses = (dpaa_sec_session *)
1942                                         get_sec_session_private_data(
1943                                                         op->sym->sec_session);
1944                                 break;
1945 #endif
1946                         default:
1947                                 DPAA_SEC_DP_ERR(
1948                                         "sessionless crypto op not supported");
1949                                 frames_to_send = loop;
1950                                 nb_ops = loop;
1951                                 goto send_pkts;
1952                         }
1953
1954                         if (!ses) {
1955                                 DPAA_SEC_DP_ERR("session not available");
1956                                 frames_to_send = loop;
1957                                 nb_ops = loop;
1958                                 goto send_pkts;
1959                         }
1960
1961                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1962                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1963                                         frames_to_send = loop;
1964                                         nb_ops = loop;
1965                                         goto send_pkts;
1966                                 }
1967                         } else if (unlikely(ses->qp[rte_lcore_id() %
1968                                                 MAX_DPAA_CORES] != qp)) {
1969                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1970                                         " New qp = %p\n",
1971                                         ses->qp[rte_lcore_id() %
1972                                         MAX_DPAA_CORES], qp);
1973                                 frames_to_send = loop;
1974                                 nb_ops = loop;
1975                                 goto send_pkts;
1976                         }
1977
1978                         auth_hdr_len = op->sym->auth.data.length -
1979                                                 op->sym->cipher.data.length;
1980                         auth_tail_len = 0;
1981
1982                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1983                                   ((op->sym->m_dst == NULL) ||
1984                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1985                                 switch (ses->ctxt) {
1986 #ifdef RTE_LIB_SECURITY
1987                                 case DPAA_SEC_PDCP:
1988                                 case DPAA_SEC_IPSEC:
1989                                         cf = build_proto(op, ses);
1990                                         break;
1991 #endif
1992                                 case DPAA_SEC_AUTH:
1993                                         cf = build_auth_only(op, ses);
1994                                         break;
1995                                 case DPAA_SEC_CIPHER:
1996                                         cf = build_cipher_only(op, ses);
1997                                         break;
1998                                 case DPAA_SEC_AEAD:
1999                                         cf = build_cipher_auth_gcm(op, ses);
2000                                         auth_hdr_len = ses->auth_only_len;
2001                                         break;
2002                                 case DPAA_SEC_CIPHER_HASH:
2003                                         auth_hdr_len =
2004                                                 op->sym->cipher.data.offset
2005                                                 - op->sym->auth.data.offset;
2006                                         auth_tail_len =
2007                                                 op->sym->auth.data.length
2008                                                 - op->sym->cipher.data.length
2009                                                 - auth_hdr_len;
2010                                         cf = build_cipher_auth(op, ses);
2011                                         break;
2012                                 default:
2013                                         DPAA_SEC_DP_ERR("not supported ops");
2014                                         frames_to_send = loop;
2015                                         nb_ops = loop;
2016                                         goto send_pkts;
2017                                 }
2018                         } else {
2019                                 switch (ses->ctxt) {
2020 #ifdef RTE_LIB_SECURITY
2021                                 case DPAA_SEC_PDCP:
2022                                 case DPAA_SEC_IPSEC:
2023                                         cf = build_proto_sg(op, ses);
2024                                         break;
2025 #endif
2026                                 case DPAA_SEC_AUTH:
2027                                         cf = build_auth_only_sg(op, ses);
2028                                         break;
2029                                 case DPAA_SEC_CIPHER:
2030                                         cf = build_cipher_only_sg(op, ses);
2031                                         break;
2032                                 case DPAA_SEC_AEAD:
2033                                         cf = build_cipher_auth_gcm_sg(op, ses);
2034                                         auth_hdr_len = ses->auth_only_len;
2035                                         break;
2036                                 case DPAA_SEC_CIPHER_HASH:
2037                                         auth_hdr_len =
2038                                                 op->sym->cipher.data.offset
2039                                                 - op->sym->auth.data.offset;
2040                                         auth_tail_len =
2041                                                 op->sym->auth.data.length
2042                                                 - op->sym->cipher.data.length
2043                                                 - auth_hdr_len;
2044                                         cf = build_cipher_auth_sg(op, ses);
2045                                         break;
2046                                 default:
2047                                         DPAA_SEC_DP_ERR("not supported ops");
2048                                         frames_to_send = loop;
2049                                         nb_ops = loop;
2050                                         goto send_pkts;
2051                                 }
2052                         }
2053                         if (unlikely(!cf)) {
2054                                 frames_to_send = loop;
2055                                 nb_ops = loop;
2056                                 goto send_pkts;
2057                         }
2058
2059                         fd = &fds[loop];
2060                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2061                         fd->opaque_addr = 0;
2062                         fd->cmd = 0;
2063                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2064                         fd->_format1 = qm_fd_compound;
2065                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
2066
2067                         /* Auth_only_len is set as 0 in descriptor and it is
2068                          * overwritten here in the fd.cmd which will update
2069                          * the DPOVRD reg.
2070                          */
2071                         if (auth_hdr_len || auth_tail_len) {
2072                                 fd->cmd = 0x80000000;
2073                                 fd->cmd |=
2074                                         ((auth_tail_len << 16) | auth_hdr_len);
2075                         }
2076
2077 #ifdef RTE_LIB_SECURITY
2078                         /* In case of PDCP, per packet HFN is stored in
2079                          * mbuf priv after sym_op.
2080                          */
2081                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2082                                 fd->cmd = 0x80000000 |
2083                                         *((uint32_t *)((uint8_t *)op +
2084                                         ses->pdcp.hfn_ovd_offset));
2085                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2086                                         *((uint32_t *)((uint8_t *)op +
2087                                         ses->pdcp.hfn_ovd_offset)),
2088                                         ses->pdcp.hfn_ovd);
2089                         }
2090 #endif
2091                 }
2092 send_pkts:
2093                 loop = 0;
2094                 while (loop < frames_to_send) {
2095                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2096                                         &flags[loop], frames_to_send - loop);
2097                 }
2098                 nb_ops -= frames_to_send;
2099                 num_tx += frames_to_send;
2100         }
2101
2102         dpaa_qp->tx_pkts += num_tx;
2103         dpaa_qp->tx_errs += nb_ops - num_tx;
2104
2105         return num_tx;
2106 }
2107
2108 static uint16_t
2109 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2110                        uint16_t nb_ops)
2111 {
2112         uint16_t num_rx;
2113         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2114
2115         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2116                 if (rte_dpaa_portal_init((void *)0)) {
2117                         DPAA_SEC_ERR("Failure in affining portal");
2118                         return 0;
2119                 }
2120         }
2121
2122         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2123
2124         dpaa_qp->rx_pkts += num_rx;
2125         dpaa_qp->rx_errs += nb_ops - num_rx;
2126
2127         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2128
2129         return num_rx;
2130 }
2131
2132 /** Release queue pair */
2133 static int
2134 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2135                             uint16_t qp_id)
2136 {
2137         struct dpaa_sec_dev_private *internals;
2138         struct dpaa_sec_qp *qp = NULL;
2139
2140         PMD_INIT_FUNC_TRACE();
2141
2142         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2143
2144         internals = dev->data->dev_private;
2145         if (qp_id >= internals->max_nb_queue_pairs) {
2146                 DPAA_SEC_ERR("Max supported qpid %d",
2147                              internals->max_nb_queue_pairs);
2148                 return -EINVAL;
2149         }
2150
2151         qp = &internals->qps[qp_id];
2152         rte_mempool_free(qp->ctx_pool);
2153         qp->internals = NULL;
2154         dev->data->queue_pairs[qp_id] = NULL;
2155
2156         return 0;
2157 }
2158
2159 /** Setup a queue pair */
2160 static int
2161 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2162                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2163                 __rte_unused int socket_id)
2164 {
2165         struct dpaa_sec_dev_private *internals;
2166         struct dpaa_sec_qp *qp = NULL;
2167         char str[20];
2168
2169         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2170
2171         internals = dev->data->dev_private;
2172         if (qp_id >= internals->max_nb_queue_pairs) {
2173                 DPAA_SEC_ERR("Max supported qpid %d",
2174                              internals->max_nb_queue_pairs);
2175                 return -EINVAL;
2176         }
2177
2178         qp = &internals->qps[qp_id];
2179         qp->internals = internals;
2180         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2181                         dev->data->dev_id, qp_id);
2182         if (!qp->ctx_pool) {
2183                 qp->ctx_pool = rte_mempool_create((const char *)str,
2184                                                         CTX_POOL_NUM_BUFS,
2185                                                         CTX_POOL_BUF_SIZE,
2186                                                         CTX_POOL_CACHE_SIZE, 0,
2187                                                         NULL, NULL, NULL, NULL,
2188                                                         SOCKET_ID_ANY, 0);
2189                 if (!qp->ctx_pool) {
2190                         DPAA_SEC_ERR("%s create failed\n", str);
2191                         return -ENOMEM;
2192                 }
2193         } else
2194                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2195                                 dev->data->dev_id, qp_id);
2196         dev->data->queue_pairs[qp_id] = qp;
2197
2198         return 0;
2199 }
2200
2201 /** Returns the size of session structure */
2202 static unsigned int
2203 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2204 {
2205         PMD_INIT_FUNC_TRACE();
2206
2207         return sizeof(dpaa_sec_session);
2208 }
2209
2210 static int
2211 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2212                      struct rte_crypto_sym_xform *xform,
2213                      dpaa_sec_session *session)
2214 {
2215         session->ctxt = DPAA_SEC_CIPHER;
2216         session->cipher_alg = xform->cipher.algo;
2217         session->iv.length = xform->cipher.iv.length;
2218         session->iv.offset = xform->cipher.iv.offset;
2219         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2220                                                RTE_CACHE_LINE_SIZE);
2221         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2222                 DPAA_SEC_ERR("No Memory for cipher key");
2223                 return -ENOMEM;
2224         }
2225         session->cipher_key.length = xform->cipher.key.length;
2226
2227         memcpy(session->cipher_key.data, xform->cipher.key.data,
2228                xform->cipher.key.length);
2229         switch (xform->cipher.algo) {
2230         case RTE_CRYPTO_CIPHER_AES_CBC:
2231                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2232                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2233                 break;
2234         case RTE_CRYPTO_CIPHER_DES_CBC:
2235                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2236                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2237                 break;
2238         case RTE_CRYPTO_CIPHER_3DES_CBC:
2239                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2240                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2241                 break;
2242         case RTE_CRYPTO_CIPHER_AES_CTR:
2243                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2244                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2245                 break;
2246         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2247                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2248                 break;
2249         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2250                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2251                 break;
2252         default:
2253                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2254                               xform->cipher.algo);
2255                 return -ENOTSUP;
2256         }
2257         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2258                         DIR_ENC : DIR_DEC;
2259
2260         return 0;
2261 }
2262
2263 static int
2264 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2265                    struct rte_crypto_sym_xform *xform,
2266                    dpaa_sec_session *session)
2267 {
2268         session->ctxt = DPAA_SEC_AUTH;
2269         session->auth_alg = xform->auth.algo;
2270         session->auth_key.length = xform->auth.key.length;
2271         if (xform->auth.key.length) {
2272                 session->auth_key.data =
2273                                 rte_zmalloc(NULL, xform->auth.key.length,
2274                                              RTE_CACHE_LINE_SIZE);
2275                 if (session->auth_key.data == NULL) {
2276                         DPAA_SEC_ERR("No Memory for auth key");
2277                         return -ENOMEM;
2278                 }
2279                 memcpy(session->auth_key.data, xform->auth.key.data,
2280                                 xform->auth.key.length);
2281
2282         }
2283         session->digest_length = xform->auth.digest_length;
2284         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2285                 session->iv.offset = xform->auth.iv.offset;
2286                 session->iv.length = xform->auth.iv.length;
2287         }
2288
2289         switch (xform->auth.algo) {
2290         case RTE_CRYPTO_AUTH_SHA1:
2291                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2292                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2293                 break;
2294         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2295                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2296                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2297                 break;
2298         case RTE_CRYPTO_AUTH_MD5:
2299                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2300                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2301                 break;
2302         case RTE_CRYPTO_AUTH_MD5_HMAC:
2303                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2304                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2305                 break;
2306         case RTE_CRYPTO_AUTH_SHA224:
2307                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2308                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2309                 break;
2310         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2311                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2312                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2313                 break;
2314         case RTE_CRYPTO_AUTH_SHA256:
2315                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2316                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2317                 break;
2318         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2319                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2320                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2321                 break;
2322         case RTE_CRYPTO_AUTH_SHA384:
2323                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2324                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2325                 break;
2326         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2327                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2328                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2329                 break;
2330         case RTE_CRYPTO_AUTH_SHA512:
2331                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2332                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2333                 break;
2334         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2335                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2336                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2337                 break;
2338         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2339                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2340                 session->auth_key.algmode = OP_ALG_AAI_F9;
2341                 break;
2342         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2343                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2344                 session->auth_key.algmode = OP_ALG_AAI_F9;
2345                 break;
2346         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2347                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2348                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2349                 break;
2350         case RTE_CRYPTO_AUTH_AES_CMAC:
2351                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2352                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2353                 break;
2354         default:
2355                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2356                               xform->auth.algo);
2357                 return -ENOTSUP;
2358         }
2359
2360         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2361                         DIR_ENC : DIR_DEC;
2362
2363         return 0;
2364 }
2365
2366 static int
2367 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2368                    struct rte_crypto_sym_xform *xform,
2369                    dpaa_sec_session *session)
2370 {
2371
2372         struct rte_crypto_cipher_xform *cipher_xform;
2373         struct rte_crypto_auth_xform *auth_xform;
2374
2375         session->ctxt = DPAA_SEC_CIPHER_HASH;
2376         if (session->auth_cipher_text) {
2377                 cipher_xform = &xform->cipher;
2378                 auth_xform = &xform->next->auth;
2379         } else {
2380                 cipher_xform = &xform->next->cipher;
2381                 auth_xform = &xform->auth;
2382         }
2383
2384         /* Set IV parameters */
2385         session->iv.offset = cipher_xform->iv.offset;
2386         session->iv.length = cipher_xform->iv.length;
2387
2388         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2389                                                RTE_CACHE_LINE_SIZE);
2390         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2391                 DPAA_SEC_ERR("No Memory for cipher key");
2392                 return -ENOMEM;
2393         }
2394         session->cipher_key.length = cipher_xform->key.length;
2395         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2396                                              RTE_CACHE_LINE_SIZE);
2397         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2398                 DPAA_SEC_ERR("No Memory for auth key");
2399                 return -ENOMEM;
2400         }
2401         session->auth_key.length = auth_xform->key.length;
2402         memcpy(session->cipher_key.data, cipher_xform->key.data,
2403                cipher_xform->key.length);
2404         memcpy(session->auth_key.data, auth_xform->key.data,
2405                auth_xform->key.length);
2406
2407         session->digest_length = auth_xform->digest_length;
2408         session->auth_alg = auth_xform->algo;
2409
2410         switch (auth_xform->algo) {
2411         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2412                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2413                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2414                 break;
2415         case RTE_CRYPTO_AUTH_MD5_HMAC:
2416                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2417                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2418                 break;
2419         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2420                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2421                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2422                 break;
2423         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2424                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2425                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2426                 break;
2427         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2428                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2429                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2430                 break;
2431         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2432                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2433                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2434                 break;
2435         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2436                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2437                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2438                 break;
2439         case RTE_CRYPTO_AUTH_AES_CMAC:
2440                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2441                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2442                 break;
2443         default:
2444                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2445                               auth_xform->algo);
2446                 return -ENOTSUP;
2447         }
2448
2449         session->cipher_alg = cipher_xform->algo;
2450
2451         switch (cipher_xform->algo) {
2452         case RTE_CRYPTO_CIPHER_AES_CBC:
2453                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2454                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2455                 break;
2456         case RTE_CRYPTO_CIPHER_DES_CBC:
2457                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2458                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2459                 break;
2460         case RTE_CRYPTO_CIPHER_3DES_CBC:
2461                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2462                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2463                 break;
2464         case RTE_CRYPTO_CIPHER_AES_CTR:
2465                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2466                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2467                 break;
2468         default:
2469                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2470                               cipher_xform->algo);
2471                 return -ENOTSUP;
2472         }
2473         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2474                                 DIR_ENC : DIR_DEC;
2475         return 0;
2476 }
2477
2478 static int
2479 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2480                    struct rte_crypto_sym_xform *xform,
2481                    dpaa_sec_session *session)
2482 {
2483         session->aead_alg = xform->aead.algo;
2484         session->ctxt = DPAA_SEC_AEAD;
2485         session->iv.length = xform->aead.iv.length;
2486         session->iv.offset = xform->aead.iv.offset;
2487         session->auth_only_len = xform->aead.aad_length;
2488         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2489                                              RTE_CACHE_LINE_SIZE);
2490         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2491                 DPAA_SEC_ERR("No Memory for aead key\n");
2492                 return -ENOMEM;
2493         }
2494         session->aead_key.length = xform->aead.key.length;
2495         session->digest_length = xform->aead.digest_length;
2496
2497         memcpy(session->aead_key.data, xform->aead.key.data,
2498                xform->aead.key.length);
2499
2500         switch (session->aead_alg) {
2501         case RTE_CRYPTO_AEAD_AES_GCM:
2502                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2503                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2504                 break;
2505         default:
2506                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2507                 return -ENOTSUP;
2508         }
2509
2510         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2511                         DIR_ENC : DIR_DEC;
2512
2513         return 0;
2514 }
2515
2516 static struct qman_fq *
2517 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2518 {
2519         unsigned int i;
2520
2521         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2522                 if (qi->inq_attach[i] == 0) {
2523                         qi->inq_attach[i] = 1;
2524                         return &qi->inq[i];
2525                 }
2526         }
2527         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2528
2529         return NULL;
2530 }
2531
2532 static int
2533 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2534 {
2535         unsigned int i;
2536
2537         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2538                 if (&qi->inq[i] == fq) {
2539                         if (qman_retire_fq(fq, NULL) != 0)
2540                                 DPAA_SEC_DEBUG("Queue is not retired\n");
2541                         qman_oos_fq(fq);
2542                         qi->inq_attach[i] = 0;
2543                         return 0;
2544                 }
2545         }
2546         return -1;
2547 }
2548
2549 int
2550 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2551 {
2552         int ret;
2553
2554         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2555         ret = dpaa_sec_prep_cdb(sess);
2556         if (ret) {
2557                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2558                 return ret;
2559         }
2560         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2561                 ret = rte_dpaa_portal_init((void *)0);
2562                 if (ret) {
2563                         DPAA_SEC_ERR("Failure in affining portal");
2564                         return ret;
2565                 }
2566         }
2567         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2568                                rte_dpaa_mem_vtop(&sess->cdb),
2569                                qman_fq_fqid(&qp->outq));
2570         if (ret)
2571                 DPAA_SEC_ERR("Unable to init sec queue");
2572
2573         return ret;
2574 }
2575
2576 static inline void
2577 free_session_data(dpaa_sec_session *s)
2578 {
2579         if (is_aead(s))
2580                 rte_free(s->aead_key.data);
2581         else {
2582                 rte_free(s->auth_key.data);
2583                 rte_free(s->cipher_key.data);
2584         }
2585         memset(s, 0, sizeof(dpaa_sec_session));
2586 }
2587
2588 static int
2589 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2590                             struct rte_crypto_sym_xform *xform, void *sess)
2591 {
2592         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2593         dpaa_sec_session *session = sess;
2594         uint32_t i;
2595         int ret;
2596
2597         PMD_INIT_FUNC_TRACE();
2598
2599         if (unlikely(sess == NULL)) {
2600                 DPAA_SEC_ERR("invalid session struct");
2601                 return -EINVAL;
2602         }
2603         memset(session, 0, sizeof(dpaa_sec_session));
2604
2605         /* Default IV length = 0 */
2606         session->iv.length = 0;
2607
2608         /* Cipher Only */
2609         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2610                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2611                 ret = dpaa_sec_cipher_init(dev, xform, session);
2612
2613         /* Authentication Only */
2614         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2615                    xform->next == NULL) {
2616                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2617                 session->ctxt = DPAA_SEC_AUTH;
2618                 ret = dpaa_sec_auth_init(dev, xform, session);
2619
2620         /* Cipher then Authenticate */
2621         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2622                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2623                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2624                         session->auth_cipher_text = 1;
2625                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2626                                 ret = dpaa_sec_auth_init(dev, xform, session);
2627                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2628                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2629                         else
2630                                 ret = dpaa_sec_chain_init(dev, xform, session);
2631                 } else {
2632                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2633                         return -ENOTSUP;
2634                 }
2635         /* Authenticate then Cipher */
2636         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2637                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2638                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2639                         session->auth_cipher_text = 0;
2640                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2641                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2642                         else if (xform->next->cipher.algo
2643                                         == RTE_CRYPTO_CIPHER_NULL)
2644                                 ret = dpaa_sec_auth_init(dev, xform, session);
2645                         else
2646                                 ret = dpaa_sec_chain_init(dev, xform, session);
2647                 } else {
2648                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2649                         return -ENOTSUP;
2650                 }
2651
2652         /* AEAD operation for AES-GCM kind of Algorithms */
2653         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2654                    xform->next == NULL) {
2655                 ret = dpaa_sec_aead_init(dev, xform, session);
2656
2657         } else {
2658                 DPAA_SEC_ERR("Invalid crypto type");
2659                 return -EINVAL;
2660         }
2661         if (ret) {
2662                 DPAA_SEC_ERR("unable to init session");
2663                 goto err1;
2664         }
2665
2666         rte_spinlock_lock(&internals->lock);
2667         for (i = 0; i < MAX_DPAA_CORES; i++) {
2668                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2669                 if (session->inq[i] == NULL) {
2670                         DPAA_SEC_ERR("unable to attach sec queue");
2671                         rte_spinlock_unlock(&internals->lock);
2672                         ret = -EBUSY;
2673                         goto err1;
2674                 }
2675         }
2676         rte_spinlock_unlock(&internals->lock);
2677
2678         return 0;
2679
2680 err1:
2681         free_session_data(session);
2682         return ret;
2683 }
2684
2685 static int
2686 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2687                 struct rte_crypto_sym_xform *xform,
2688                 struct rte_cryptodev_sym_session *sess,
2689                 struct rte_mempool *mempool)
2690 {
2691         void *sess_private_data;
2692         int ret;
2693
2694         PMD_INIT_FUNC_TRACE();
2695
2696         if (rte_mempool_get(mempool, &sess_private_data)) {
2697                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2698                 return -ENOMEM;
2699         }
2700
2701         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2702         if (ret != 0) {
2703                 DPAA_SEC_ERR("failed to configure session parameters");
2704
2705                 /* Return session to mempool */
2706                 rte_mempool_put(mempool, sess_private_data);
2707                 return ret;
2708         }
2709
2710         set_sym_session_private_data(sess, dev->driver_id,
2711                         sess_private_data);
2712
2713
2714         return 0;
2715 }
2716
2717 static inline void
2718 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2719 {
2720         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2721         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2722         uint8_t i;
2723
2724         for (i = 0; i < MAX_DPAA_CORES; i++) {
2725                 if (s->inq[i])
2726                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2727                 s->inq[i] = NULL;
2728                 s->qp[i] = NULL;
2729         }
2730         free_session_data(s);
2731         rte_mempool_put(sess_mp, (void *)s);
2732 }
2733
2734 /** Clear the memory of session so it doesn't leave key material behind */
2735 static void
2736 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2737                 struct rte_cryptodev_sym_session *sess)
2738 {
2739         PMD_INIT_FUNC_TRACE();
2740         uint8_t index = dev->driver_id;
2741         void *sess_priv = get_sym_session_private_data(sess, index);
2742         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2743
2744         if (sess_priv) {
2745                 free_session_memory(dev, s);
2746                 set_sym_session_private_data(sess, index, NULL);
2747         }
2748 }
2749
2750 #ifdef RTE_LIB_SECURITY
2751 static int
2752 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2753                         struct rte_security_ipsec_xform *ipsec_xform,
2754                         dpaa_sec_session *session)
2755 {
2756         PMD_INIT_FUNC_TRACE();
2757
2758         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2759                                                RTE_CACHE_LINE_SIZE);
2760         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2761                 DPAA_SEC_ERR("No Memory for aead key");
2762                 return -ENOMEM;
2763         }
2764         memcpy(session->aead_key.data, aead_xform->key.data,
2765                aead_xform->key.length);
2766
2767         session->digest_length = aead_xform->digest_length;
2768         session->aead_key.length = aead_xform->key.length;
2769
2770         switch (aead_xform->algo) {
2771         case RTE_CRYPTO_AEAD_AES_GCM:
2772                 switch (session->digest_length) {
2773                 case 8:
2774                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2775                         break;
2776                 case 12:
2777                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2778                         break;
2779                 case 16:
2780                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2781                         break;
2782                 default:
2783                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2784                                      session->digest_length);
2785                         return -EINVAL;
2786                 }
2787                 if (session->dir == DIR_ENC) {
2788                         memcpy(session->encap_pdb.gcm.salt,
2789                                 (uint8_t *)&(ipsec_xform->salt), 4);
2790                 } else {
2791                         memcpy(session->decap_pdb.gcm.salt,
2792                                 (uint8_t *)&(ipsec_xform->salt), 4);
2793                 }
2794                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2795                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2796                 break;
2797         default:
2798                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2799                               aead_xform->algo);
2800                 return -ENOTSUP;
2801         }
2802         return 0;
2803 }
2804
2805 static int
2806 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2807         struct rte_crypto_auth_xform *auth_xform,
2808         struct rte_security_ipsec_xform *ipsec_xform,
2809         dpaa_sec_session *session)
2810 {
2811         if (cipher_xform) {
2812                 session->cipher_key.data = rte_zmalloc(NULL,
2813                                                        cipher_xform->key.length,
2814                                                        RTE_CACHE_LINE_SIZE);
2815                 if (session->cipher_key.data == NULL &&
2816                                 cipher_xform->key.length > 0) {
2817                         DPAA_SEC_ERR("No Memory for cipher key");
2818                         return -ENOMEM;
2819                 }
2820
2821                 session->cipher_key.length = cipher_xform->key.length;
2822                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2823                                 cipher_xform->key.length);
2824                 session->cipher_alg = cipher_xform->algo;
2825         } else {
2826                 session->cipher_key.data = NULL;
2827                 session->cipher_key.length = 0;
2828                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2829         }
2830
2831         if (auth_xform) {
2832                 session->auth_key.data = rte_zmalloc(NULL,
2833                                                 auth_xform->key.length,
2834                                                 RTE_CACHE_LINE_SIZE);
2835                 if (session->auth_key.data == NULL &&
2836                                 auth_xform->key.length > 0) {
2837                         DPAA_SEC_ERR("No Memory for auth key");
2838                         return -ENOMEM;
2839                 }
2840                 session->auth_key.length = auth_xform->key.length;
2841                 memcpy(session->auth_key.data, auth_xform->key.data,
2842                                 auth_xform->key.length);
2843                 session->auth_alg = auth_xform->algo;
2844                 session->digest_length = auth_xform->digest_length;
2845         } else {
2846                 session->auth_key.data = NULL;
2847                 session->auth_key.length = 0;
2848                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2849         }
2850
2851         switch (session->auth_alg) {
2852         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2853                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2854                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2855                 break;
2856         case RTE_CRYPTO_AUTH_MD5_HMAC:
2857                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2858                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2859                 break;
2860         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2861                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2862                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2863                 if (session->digest_length != 16)
2864                         DPAA_SEC_WARN(
2865                         "+++Using sha256-hmac truncated len is non-standard,"
2866                         "it will not work with lookaside proto");
2867                 break;
2868         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2869                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2870                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2871                 break;
2872         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2873                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2874                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2875                 break;
2876         case RTE_CRYPTO_AUTH_AES_CMAC:
2877                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2878                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2879                 break;
2880         case RTE_CRYPTO_AUTH_NULL:
2881                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2882                 break;
2883         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2884                 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2885                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2886                 break;
2887         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2888         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2889         case RTE_CRYPTO_AUTH_SHA1:
2890         case RTE_CRYPTO_AUTH_SHA256:
2891         case RTE_CRYPTO_AUTH_SHA512:
2892         case RTE_CRYPTO_AUTH_SHA224:
2893         case RTE_CRYPTO_AUTH_SHA384:
2894         case RTE_CRYPTO_AUTH_MD5:
2895         case RTE_CRYPTO_AUTH_AES_GMAC:
2896         case RTE_CRYPTO_AUTH_KASUMI_F9:
2897         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2898         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2899                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2900                               session->auth_alg);
2901                 return -ENOTSUP;
2902         default:
2903                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2904                               session->auth_alg);
2905                 return -ENOTSUP;
2906         }
2907
2908         switch (session->cipher_alg) {
2909         case RTE_CRYPTO_CIPHER_AES_CBC:
2910                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2911                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2912                 break;
2913         case RTE_CRYPTO_CIPHER_DES_CBC:
2914                 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2915                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2916                 break;
2917         case RTE_CRYPTO_CIPHER_3DES_CBC:
2918                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2919                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2920                 break;
2921         case RTE_CRYPTO_CIPHER_AES_CTR:
2922                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2923                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2924                 if (session->dir == DIR_ENC) {
2925                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2926                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2927                 } else {
2928                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2929                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2930                 }
2931                 break;
2932         case RTE_CRYPTO_CIPHER_NULL:
2933                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2934                 break;
2935         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2936         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2937         case RTE_CRYPTO_CIPHER_3DES_ECB:
2938         case RTE_CRYPTO_CIPHER_AES_ECB:
2939         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2940                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2941                               session->cipher_alg);
2942                 return -ENOTSUP;
2943         default:
2944                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2945                               session->cipher_alg);
2946                 return -ENOTSUP;
2947         }
2948
2949         return 0;
2950 }
2951
2952 static int
2953 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2954                            struct rte_security_session_conf *conf,
2955                            void *sess)
2956 {
2957         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2958         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2959         struct rte_crypto_auth_xform *auth_xform = NULL;
2960         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2961         struct rte_crypto_aead_xform *aead_xform = NULL;
2962         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2963         uint32_t i;
2964         int ret;
2965
2966         PMD_INIT_FUNC_TRACE();
2967
2968         memset(session, 0, sizeof(dpaa_sec_session));
2969         session->proto_alg = conf->protocol;
2970         session->ctxt = DPAA_SEC_IPSEC;
2971
2972         if (ipsec_xform->life.bytes_hard_limit != 0 ||
2973             ipsec_xform->life.bytes_soft_limit != 0 ||
2974             ipsec_xform->life.packets_hard_limit != 0 ||
2975             ipsec_xform->life.packets_soft_limit != 0)
2976                 return -ENOTSUP;
2977
2978         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2979                 session->dir = DIR_ENC;
2980         else
2981                 session->dir = DIR_DEC;
2982
2983         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2984                 cipher_xform = &conf->crypto_xform->cipher;
2985                 if (conf->crypto_xform->next)
2986                         auth_xform = &conf->crypto_xform->next->auth;
2987                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2988                                         ipsec_xform, session);
2989         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2990                 auth_xform = &conf->crypto_xform->auth;
2991                 if (conf->crypto_xform->next)
2992                         cipher_xform = &conf->crypto_xform->next->cipher;
2993                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2994                                         ipsec_xform, session);
2995         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2996                 aead_xform = &conf->crypto_xform->aead;
2997                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2998                                         ipsec_xform, session);
2999         } else {
3000                 DPAA_SEC_ERR("XFORM not specified");
3001                 ret = -EINVAL;
3002                 goto out;
3003         }
3004         if (ret) {
3005                 DPAA_SEC_ERR("Failed to process xform");
3006                 goto out;
3007         }
3008
3009         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3010                 if (ipsec_xform->tunnel.type ==
3011                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3012                         session->ip4_hdr.ip_v = IPVERSION;
3013                         session->ip4_hdr.ip_hl = 5;
3014                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
3015                                                 sizeof(session->ip4_hdr));
3016                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3017                         session->ip4_hdr.ip_id = 0;
3018                         session->ip4_hdr.ip_off = 0;
3019                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3020                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
3021                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3022                                         IPPROTO_ESP : IPPROTO_AH;
3023                         session->ip4_hdr.ip_sum = 0;
3024                         session->ip4_hdr.ip_src =
3025                                         ipsec_xform->tunnel.ipv4.src_ip;
3026                         session->ip4_hdr.ip_dst =
3027                                         ipsec_xform->tunnel.ipv4.dst_ip;
3028                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
3029                                                 (void *)&session->ip4_hdr,
3030                                                 sizeof(struct ip));
3031                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
3032                 } else if (ipsec_xform->tunnel.type ==
3033                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3034                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3035                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
3036                                 ((ipsec_xform->tunnel.ipv6.dscp <<
3037                                         RTE_IPV6_HDR_TC_SHIFT) &
3038                                         RTE_IPV6_HDR_TC_MASK) |
3039                                 ((ipsec_xform->tunnel.ipv6.flabel <<
3040                                         RTE_IPV6_HDR_FL_SHIFT) &
3041                                         RTE_IPV6_HDR_FL_MASK));
3042                         /* Payload length will be updated by HW */
3043                         session->ip6_hdr.payload_len = 0;
3044                         session->ip6_hdr.hop_limits =
3045                                         ipsec_xform->tunnel.ipv6.hlimit;
3046                         session->ip6_hdr.proto = (ipsec_xform->proto ==
3047                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3048                                         IPPROTO_ESP : IPPROTO_AH;
3049                         memcpy(&session->ip6_hdr.src_addr,
3050                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
3051                         memcpy(&session->ip6_hdr.dst_addr,
3052                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3053                         session->encap_pdb.ip_hdr_len =
3054                                                 sizeof(struct rte_ipv6_hdr);
3055                 }
3056
3057                 session->encap_pdb.options =
3058                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3059                         PDBOPTS_ESP_OIHI_PDB_INL |
3060                         PDBOPTS_ESP_IVSRC |
3061                         PDBHMO_ESP_SNR;
3062                 if (ipsec_xform->options.dec_ttl)
3063                         session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3064                 if (ipsec_xform->options.esn)
3065                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3066                 session->encap_pdb.spi = ipsec_xform->spi;
3067
3068         } else if (ipsec_xform->direction ==
3069                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3070                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3071                         session->decap_pdb.options = sizeof(struct ip) << 16;
3072                 else
3073                         session->decap_pdb.options =
3074                                         sizeof(struct rte_ipv6_hdr) << 16;
3075                 if (ipsec_xform->options.esn)
3076                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3077                 if (ipsec_xform->replay_win_sz) {
3078                         uint32_t win_sz;
3079                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3080
3081                         switch (win_sz) {
3082                         case 1:
3083                         case 2:
3084                         case 4:
3085                         case 8:
3086                         case 16:
3087                         case 32:
3088                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3089                                 break;
3090                         case 64:
3091                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3092                                 break;
3093                         default:
3094                                 session->decap_pdb.options |=
3095                                                         PDBOPTS_ESP_ARS128;
3096                         }
3097                 }
3098         } else
3099                 goto out;
3100         rte_spinlock_lock(&internals->lock);
3101         for (i = 0; i < MAX_DPAA_CORES; i++) {
3102                 session->inq[i] = dpaa_sec_attach_rxq(internals);
3103                 if (session->inq[i] == NULL) {
3104                         DPAA_SEC_ERR("unable to attach sec queue");
3105                         rte_spinlock_unlock(&internals->lock);
3106                         goto out;
3107                 }
3108         }
3109         rte_spinlock_unlock(&internals->lock);
3110
3111         return 0;
3112 out:
3113         free_session_data(session);
3114         return -1;
3115 }
3116
3117 static int
3118 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3119                           struct rte_security_session_conf *conf,
3120                           void *sess)
3121 {
3122         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3123         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3124         struct rte_crypto_auth_xform *auth_xform = NULL;
3125         struct rte_crypto_cipher_xform *cipher_xform = NULL;
3126         dpaa_sec_session *session = (dpaa_sec_session *)sess;
3127         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3128         uint32_t i;
3129         int ret;
3130
3131         PMD_INIT_FUNC_TRACE();
3132
3133         memset(session, 0, sizeof(dpaa_sec_session));
3134
3135         /* find xfrm types */
3136         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3137                 cipher_xform = &xform->cipher;
3138                 if (xform->next != NULL &&
3139                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3140                         auth_xform = &xform->next->auth;
3141         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3142                 auth_xform = &xform->auth;
3143                 if (xform->next != NULL &&
3144                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3145                         cipher_xform = &xform->next->cipher;
3146         } else {
3147                 DPAA_SEC_ERR("Invalid crypto type");
3148                 return -EINVAL;
3149         }
3150
3151         session->proto_alg = conf->protocol;
3152         session->ctxt = DPAA_SEC_PDCP;
3153
3154         if (cipher_xform) {
3155                 switch (cipher_xform->algo) {
3156                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3157                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3158                         break;
3159                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3160                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3161                         break;
3162                 case RTE_CRYPTO_CIPHER_AES_CTR:
3163                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3164                         break;
3165                 case RTE_CRYPTO_CIPHER_NULL:
3166                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3167                         break;
3168                 default:
3169                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3170                                       session->cipher_alg);
3171                         return -EINVAL;
3172                 }
3173
3174                 session->cipher_key.data = rte_zmalloc(NULL,
3175                                                cipher_xform->key.length,
3176                                                RTE_CACHE_LINE_SIZE);
3177                 if (session->cipher_key.data == NULL &&
3178                                 cipher_xform->key.length > 0) {
3179                         DPAA_SEC_ERR("No Memory for cipher key");
3180                         return -ENOMEM;
3181                 }
3182                 session->cipher_key.length = cipher_xform->key.length;
3183                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3184                         cipher_xform->key.length);
3185                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3186                                         DIR_ENC : DIR_DEC;
3187                 session->cipher_alg = cipher_xform->algo;
3188         } else {
3189                 session->cipher_key.data = NULL;
3190                 session->cipher_key.length = 0;
3191                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3192                 session->dir = DIR_ENC;
3193         }
3194
3195         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3196                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3197                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3198                         DPAA_SEC_ERR(
3199                                 "PDCP Seq Num size should be 5/12 bits for cmode");
3200                         ret = -EINVAL;
3201                         goto out;
3202                 }
3203         }
3204
3205         if (auth_xform) {
3206                 switch (auth_xform->algo) {
3207                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3208                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3209                         break;
3210                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3211                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3212                         break;
3213                 case RTE_CRYPTO_AUTH_AES_CMAC:
3214                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3215                         break;
3216                 case RTE_CRYPTO_AUTH_NULL:
3217                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3218                         break;
3219                 default:
3220                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3221                                       session->auth_alg);
3222                         rte_free(session->cipher_key.data);
3223                         return -EINVAL;
3224                 }
3225                 session->auth_key.data = rte_zmalloc(NULL,
3226                                                      auth_xform->key.length,
3227                                                      RTE_CACHE_LINE_SIZE);
3228                 if (!session->auth_key.data &&
3229                     auth_xform->key.length > 0) {
3230                         DPAA_SEC_ERR("No Memory for auth key");
3231                         rte_free(session->cipher_key.data);
3232                         return -ENOMEM;
3233                 }
3234                 session->auth_key.length = auth_xform->key.length;
3235                 memcpy(session->auth_key.data, auth_xform->key.data,
3236                        auth_xform->key.length);
3237                 session->auth_alg = auth_xform->algo;
3238         } else {
3239                 session->auth_key.data = NULL;
3240                 session->auth_key.length = 0;
3241                 session->auth_alg = 0;
3242         }
3243         session->pdcp.domain = pdcp_xform->domain;
3244         session->pdcp.bearer = pdcp_xform->bearer;
3245         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3246         session->pdcp.sn_size = pdcp_xform->sn_size;
3247         session->pdcp.hfn = pdcp_xform->hfn;
3248         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3249         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3250         session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3251         if (cipher_xform)
3252                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3253
3254         rte_spinlock_lock(&dev_priv->lock);
3255         for (i = 0; i < MAX_DPAA_CORES; i++) {
3256                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3257                 if (session->inq[i] == NULL) {
3258                         DPAA_SEC_ERR("unable to attach sec queue");
3259                         rte_spinlock_unlock(&dev_priv->lock);
3260                         ret = -EBUSY;
3261                         goto out;
3262                 }
3263         }
3264         rte_spinlock_unlock(&dev_priv->lock);
3265         return 0;
3266 out:
3267         rte_free(session->auth_key.data);
3268         rte_free(session->cipher_key.data);
3269         memset(session, 0, sizeof(dpaa_sec_session));
3270         return ret;
3271 }
3272
3273 static int
3274 dpaa_sec_security_session_create(void *dev,
3275                                  struct rte_security_session_conf *conf,
3276                                  struct rte_security_session *sess,
3277                                  struct rte_mempool *mempool)
3278 {
3279         void *sess_private_data;
3280         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3281         int ret;
3282
3283         if (rte_mempool_get(mempool, &sess_private_data)) {
3284                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3285                 return -ENOMEM;
3286         }
3287
3288         switch (conf->protocol) {
3289         case RTE_SECURITY_PROTOCOL_IPSEC:
3290                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3291                                 sess_private_data);
3292                 break;
3293         case RTE_SECURITY_PROTOCOL_PDCP:
3294                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3295                                 sess_private_data);
3296                 break;
3297         case RTE_SECURITY_PROTOCOL_MACSEC:
3298                 return -ENOTSUP;
3299         default:
3300                 return -EINVAL;
3301         }
3302         if (ret != 0) {
3303                 DPAA_SEC_ERR("failed to configure session parameters");
3304                 /* Return session to mempool */
3305                 rte_mempool_put(mempool, sess_private_data);
3306                 return ret;
3307         }
3308
3309         set_sec_session_private_data(sess, sess_private_data);
3310
3311         return ret;
3312 }
3313
3314 /** Clear the memory of session so it doesn't leave key material behind */
3315 static int
3316 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3317                 struct rte_security_session *sess)
3318 {
3319         PMD_INIT_FUNC_TRACE();
3320         void *sess_priv = get_sec_session_private_data(sess);
3321         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3322
3323         if (sess_priv) {
3324                 free_session_memory((struct rte_cryptodev *)dev, s);
3325                 set_sec_session_private_data(sess, NULL);
3326         }
3327         return 0;
3328 }
3329 #endif
3330 static int
3331 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3332                        struct rte_cryptodev_config *config __rte_unused)
3333 {
3334         PMD_INIT_FUNC_TRACE();
3335
3336         return 0;
3337 }
3338
3339 static int
3340 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3341 {
3342         PMD_INIT_FUNC_TRACE();
3343         return 0;
3344 }
3345
3346 static void
3347 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3348 {
3349         PMD_INIT_FUNC_TRACE();
3350 }
3351
3352 static int
3353 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3354 {
3355         PMD_INIT_FUNC_TRACE();
3356
3357         if (dev == NULL)
3358                 return -ENOMEM;
3359
3360         return 0;
3361 }
3362
3363 static void
3364 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3365                        struct rte_cryptodev_info *info)
3366 {
3367         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3368
3369         PMD_INIT_FUNC_TRACE();
3370         if (info != NULL) {
3371                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3372                 info->feature_flags = dev->feature_flags;
3373                 info->capabilities = dpaa_sec_capabilities;
3374                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3375                 info->driver_id = dpaa_cryptodev_driver_id;
3376         }
3377 }
3378
3379 static enum qman_cb_dqrr_result
3380 dpaa_sec_process_parallel_event(void *event,
3381                         struct qman_portal *qm __always_unused,
3382                         struct qman_fq *outq,
3383                         const struct qm_dqrr_entry *dqrr,
3384                         void **bufs)
3385 {
3386         const struct qm_fd *fd;
3387         struct dpaa_sec_job *job;
3388         struct dpaa_sec_op_ctx *ctx;
3389         struct rte_event *ev = (struct rte_event *)event;
3390
3391         fd = &dqrr->fd;
3392
3393         /* sg is embedded in an op ctx,
3394          * sg[0] is for output
3395          * sg[1] for input
3396          */
3397         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3398
3399         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3400         ctx->fd_status = fd->status;
3401         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3402                 struct qm_sg_entry *sg_out;
3403                 uint32_t len;
3404
3405                 sg_out = &job->sg[0];
3406                 hw_sg_to_cpu(sg_out);
3407                 len = sg_out->length;
3408                 ctx->op->sym->m_src->pkt_len = len;
3409                 ctx->op->sym->m_src->data_len = len;
3410         }
3411         if (!ctx->fd_status) {
3412                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3413         } else {
3414                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3415                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3416         }
3417         ev->event_ptr = (void *)ctx->op;
3418
3419         ev->flow_id = outq->ev.flow_id;
3420         ev->sub_event_type = outq->ev.sub_event_type;
3421         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3422         ev->op = RTE_EVENT_OP_NEW;
3423         ev->sched_type = outq->ev.sched_type;
3424         ev->queue_id = outq->ev.queue_id;
3425         ev->priority = outq->ev.priority;
3426         *bufs = (void *)ctx->op;
3427
3428         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3429
3430         return qman_cb_dqrr_consume;
3431 }
3432
3433 static enum qman_cb_dqrr_result
3434 dpaa_sec_process_atomic_event(void *event,
3435                         struct qman_portal *qm __rte_unused,
3436                         struct qman_fq *outq,
3437                         const struct qm_dqrr_entry *dqrr,
3438                         void **bufs)
3439 {
3440         u8 index;
3441         const struct qm_fd *fd;
3442         struct dpaa_sec_job *job;
3443         struct dpaa_sec_op_ctx *ctx;
3444         struct rte_event *ev = (struct rte_event *)event;
3445
3446         fd = &dqrr->fd;
3447
3448         /* sg is embedded in an op ctx,
3449          * sg[0] is for output
3450          * sg[1] for input
3451          */
3452         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3453
3454         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3455         ctx->fd_status = fd->status;
3456         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3457                 struct qm_sg_entry *sg_out;
3458                 uint32_t len;
3459
3460                 sg_out = &job->sg[0];
3461                 hw_sg_to_cpu(sg_out);
3462                 len = sg_out->length;
3463                 ctx->op->sym->m_src->pkt_len = len;
3464                 ctx->op->sym->m_src->data_len = len;
3465         }
3466         if (!ctx->fd_status) {
3467                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3468         } else {
3469                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3470                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3471         }
3472         ev->event_ptr = (void *)ctx->op;
3473         ev->flow_id = outq->ev.flow_id;
3474         ev->sub_event_type = outq->ev.sub_event_type;
3475         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3476         ev->op = RTE_EVENT_OP_NEW;
3477         ev->sched_type = outq->ev.sched_type;
3478         ev->queue_id = outq->ev.queue_id;
3479         ev->priority = outq->ev.priority;
3480
3481         /* Save active dqrr entries */
3482         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3483         DPAA_PER_LCORE_DQRR_SIZE++;
3484         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3485         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3486         ev->impl_opaque = index + 1;
3487         *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3488         *bufs = (void *)ctx->op;
3489
3490         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3491
3492         return qman_cb_dqrr_defer;
3493 }
3494
3495 int
3496 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3497                 int qp_id,
3498                 uint16_t ch_id,
3499                 const struct rte_event *event)
3500 {
3501         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3502         struct qm_mcc_initfq opts = {0};
3503
3504         int ret;
3505
3506         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3507                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3508         opts.fqd.dest.channel = ch_id;
3509
3510         switch (event->sched_type) {
3511         case RTE_SCHED_TYPE_ATOMIC:
3512                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3513                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3514                  * configuration with HOLD_ACTIVE setting
3515                  */
3516                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3517                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3518                 break;
3519         case RTE_SCHED_TYPE_ORDERED:
3520                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3521                 return -ENOTSUP;
3522         default:
3523                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3524                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3525                 break;
3526         }
3527
3528         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3529         if (unlikely(ret)) {
3530                 DPAA_SEC_ERR("unable to init caam source fq!");
3531                 return ret;
3532         }
3533
3534         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3535
3536         return 0;
3537 }
3538
3539 int
3540 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3541                         int qp_id)
3542 {
3543         struct qm_mcc_initfq opts = {0};
3544         int ret;
3545         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3546
3547         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3548                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3549         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3550         qp->outq.cb.ern  = ern_sec_fq_handler;
3551         qman_retire_fq(&qp->outq, NULL);
3552         qman_oos_fq(&qp->outq);
3553         ret = qman_init_fq(&qp->outq, 0, &opts);
3554         if (ret)
3555                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3556         qp->outq.cb.dqrr = NULL;
3557
3558         return ret;
3559 }
3560
3561 static struct rte_cryptodev_ops crypto_ops = {
3562         .dev_configure        = dpaa_sec_dev_configure,
3563         .dev_start            = dpaa_sec_dev_start,
3564         .dev_stop             = dpaa_sec_dev_stop,
3565         .dev_close            = dpaa_sec_dev_close,
3566         .dev_infos_get        = dpaa_sec_dev_infos_get,
3567         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3568         .queue_pair_release   = dpaa_sec_queue_pair_release,
3569         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3570         .sym_session_configure    = dpaa_sec_sym_session_configure,
3571         .sym_session_clear        = dpaa_sec_sym_session_clear,
3572         /* Raw data-path API related operations */
3573         .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3574         .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3575 };
3576
3577 #ifdef RTE_LIB_SECURITY
3578 static const struct rte_security_capability *
3579 dpaa_sec_capabilities_get(void *device __rte_unused)
3580 {
3581         return dpaa_sec_security_cap;
3582 }
3583
3584 static const struct rte_security_ops dpaa_sec_security_ops = {
3585         .session_create = dpaa_sec_security_session_create,
3586         .session_update = NULL,
3587         .session_stats_get = NULL,
3588         .session_destroy = dpaa_sec_security_session_destroy,
3589         .set_pkt_metadata = NULL,
3590         .capabilities_get = dpaa_sec_capabilities_get
3591 };
3592 #endif
3593 static int
3594 dpaa_sec_uninit(struct rte_cryptodev *dev)
3595 {
3596         struct dpaa_sec_dev_private *internals;
3597
3598         if (dev == NULL)
3599                 return -ENODEV;
3600
3601         internals = dev->data->dev_private;
3602         rte_free(dev->security_ctx);
3603
3604         rte_free(internals);
3605
3606         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3607                       dev->data->name, rte_socket_id());
3608
3609         return 0;
3610 }
3611
3612 static int
3613 check_devargs_handler(__rte_unused const char *key, const char *value,
3614                       __rte_unused void *opaque)
3615 {
3616         dpaa_sec_dp_dump = atoi(value);
3617         if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3618                 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3619                               "supported, changing to FULL error prints\n");
3620                 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3621         }
3622
3623         return 0;
3624 }
3625
3626 static void
3627 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3628 {
3629         struct rte_kvargs *kvlist;
3630
3631         if (!devargs)
3632                 return;
3633
3634         kvlist = rte_kvargs_parse(devargs->args, NULL);
3635         if (!kvlist)
3636                 return;
3637
3638         if (!rte_kvargs_count(kvlist, key)) {
3639                 rte_kvargs_free(kvlist);
3640                 return;
3641         }
3642
3643         rte_kvargs_process(kvlist, key,
3644                                 check_devargs_handler, NULL);
3645         rte_kvargs_free(kvlist);
3646 }
3647
3648 static int
3649 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3650 {
3651         struct dpaa_sec_dev_private *internals;
3652 #ifdef RTE_LIB_SECURITY
3653         struct rte_security_ctx *security_instance;
3654 #endif
3655         struct dpaa_sec_qp *qp;
3656         uint32_t i, flags;
3657         int ret;
3658         void *cmd_map;
3659         int map_fd = -1;
3660
3661         PMD_INIT_FUNC_TRACE();
3662
3663         internals = cryptodev->data->dev_private;
3664         map_fd = open("/dev/mem", O_RDWR);
3665         if (unlikely(map_fd < 0)) {
3666                 DPAA_SEC_ERR("Unable to open (/dev/mem)");
3667                 return map_fd;
3668         }
3669         internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
3670                             MAP_SHARED, map_fd, SEC_BASE_ADDR);
3671         if (internals->sec_hw == MAP_FAILED) {
3672                 DPAA_SEC_ERR("Memory map failed");
3673                 close(map_fd);
3674                 return -EINVAL;
3675         }
3676         cmd_map = (uint8_t *)internals->sec_hw +
3677                   (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG;
3678         if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN))
3679                 /* enable QI interface */
3680                 rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map);
3681
3682         ret = munmap(internals->sec_hw, MAP_SIZE);
3683         if (ret)
3684                 DPAA_SEC_WARN("munmap failed\n");
3685
3686         close(map_fd);
3687         cryptodev->driver_id = dpaa_cryptodev_driver_id;
3688         cryptodev->dev_ops = &crypto_ops;
3689
3690         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3691         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3692         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3693                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3694                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3695                         RTE_CRYPTODEV_FF_SECURITY |
3696                         RTE_CRYPTODEV_FF_SYM_RAW_DP |
3697                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3698                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3699                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3700                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3701                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3702
3703         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3704         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3705
3706         /*
3707          * For secondary processes, we don't initialise any further as primary
3708          * has already done this work. Only check we don't need a different
3709          * RX function
3710          */
3711         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3712                 DPAA_SEC_WARN("Device already init by primary process");
3713                 return 0;
3714         }
3715 #ifdef RTE_LIB_SECURITY
3716         /* Initialize security_ctx only for primary process*/
3717         security_instance = rte_malloc("rte_security_instances_ops",
3718                                 sizeof(struct rte_security_ctx), 0);
3719         if (security_instance == NULL)
3720                 return -ENOMEM;
3721         security_instance->device = (void *)cryptodev;
3722         security_instance->ops = &dpaa_sec_security_ops;
3723         security_instance->sess_cnt = 0;
3724         cryptodev->security_ctx = security_instance;
3725 #endif
3726         rte_spinlock_init(&internals->lock);
3727         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3728                 /* init qman fq for queue pair */
3729                 qp = &internals->qps[i];
3730                 ret = dpaa_sec_init_tx(&qp->outq);
3731                 if (ret) {
3732                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3733                         goto init_error;
3734                 }
3735         }
3736
3737         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3738                 QMAN_FQ_FLAG_TO_DCPORTAL;
3739         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3740                 /* create rx qman fq for sessions*/
3741                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3742                 if (unlikely(ret != 0)) {
3743                         DPAA_SEC_ERR("sec qman_create_fq failed");
3744                         goto init_error;
3745                 }
3746         }
3747
3748         dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3749
3750         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3751         return 0;
3752
3753 init_error:
3754         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3755
3756         rte_free(cryptodev->security_ctx);
3757         return -EFAULT;
3758 }
3759
3760 static int
3761 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3762                                 struct rte_dpaa_device *dpaa_dev)
3763 {
3764         struct rte_cryptodev *cryptodev;
3765         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3766
3767         int retval;
3768
3769         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3770
3771         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3772         if (cryptodev == NULL)
3773                 return -ENOMEM;
3774
3775         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3776                 cryptodev->data->dev_private = rte_zmalloc_socket(
3777                                         "cryptodev private structure",
3778                                         sizeof(struct dpaa_sec_dev_private),
3779                                         RTE_CACHE_LINE_SIZE,
3780                                         rte_socket_id());
3781
3782                 if (cryptodev->data->dev_private == NULL)
3783                         rte_panic("Cannot allocate memzone for private "
3784                                         "device data");
3785         }
3786
3787         dpaa_dev->crypto_dev = cryptodev;
3788         cryptodev->device = &dpaa_dev->device;
3789
3790         /* init user callbacks */
3791         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3792
3793         /* if sec device version is not configured */
3794         if (!rta_get_sec_era()) {
3795                 const struct device_node *caam_node;
3796
3797                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3798                         const uint32_t *prop = of_get_property(caam_node,
3799                                         "fsl,sec-era",
3800                                         NULL);
3801                         if (prop) {
3802                                 rta_set_sec_era(
3803                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3804                                 break;
3805                         }
3806                 }
3807         }
3808
3809         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3810                 retval = rte_dpaa_portal_init((void *)1);
3811                 if (retval) {
3812                         DPAA_SEC_ERR("Unable to initialize portal");
3813                         goto out;
3814                 }
3815         }
3816
3817         /* Invoke PMD device initialization function */
3818         retval = dpaa_sec_dev_init(cryptodev);
3819         if (retval == 0) {
3820                 rte_cryptodev_pmd_probing_finish(cryptodev);
3821                 return 0;
3822         }
3823
3824         retval = -ENXIO;
3825 out:
3826         /* In case of error, cleanup is done */
3827         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3828                 rte_free(cryptodev->data->dev_private);
3829
3830         rte_cryptodev_pmd_release_device(cryptodev);
3831
3832         return retval;
3833 }
3834
3835 static int
3836 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3837 {
3838         struct rte_cryptodev *cryptodev;
3839         int ret;
3840
3841         cryptodev = dpaa_dev->crypto_dev;
3842         if (cryptodev == NULL)
3843                 return -ENODEV;
3844
3845         ret = dpaa_sec_uninit(cryptodev);
3846         if (ret)
3847                 return ret;
3848
3849         return rte_cryptodev_pmd_destroy(cryptodev);
3850 }
3851
3852 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3853         .drv_type = FSL_DPAA_CRYPTO,
3854         .driver = {
3855                 .name = "DPAA SEC PMD"
3856         },
3857         .probe = cryptodev_dpaa_sec_probe,
3858         .remove = cryptodev_dpaa_sec_remove,
3859 };
3860
3861 static struct cryptodev_driver dpaa_sec_crypto_drv;
3862
3863 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3864 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3865                 dpaa_cryptodev_driver_id);
3866 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3867                 DRIVER_DUMP_MODE "=<int>");
3868 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);