ed12d6663bb52374e47d88dc8b75583da95a5704
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2021 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30 #include <rte_hexdump.h>
31
32 #include <fsl_usd.h>
33 #include <fsl_qman.h>
34 #include <dpaa_of.h>
35
36 /* RTA header files */
37 #include <desc/common.h>
38 #include <desc/algo.h>
39 #include <desc/ipsec.h>
40 #include <desc/pdcp.h>
41 #include <desc/sdap.h>
42
43 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec.h>
45 #include <dpaa_sec_event.h>
46 #include <dpaa_sec_log.h>
47 #include <dpaax_iova_table.h>
48
49 #define DRIVER_DUMP_MODE "drv_dump_mode"
50
51 /* DPAA_SEC_DP_DUMP levels */
52 enum dpaa_sec_dump_levels {
53         DPAA_SEC_DP_NO_DUMP,
54         DPAA_SEC_DP_ERR_DUMP,
55         DPAA_SEC_DP_FULL_DUMP
56 };
57
58 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
59
60 uint8_t dpaa_cryptodev_driver_id;
61
62 static inline void
63 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
64 {
65         if (!ctx->fd_status) {
66                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
67         } else {
68                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
69                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
70         }
71 }
72
73 static inline struct dpaa_sec_op_ctx *
74 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
75 {
76         struct dpaa_sec_op_ctx *ctx;
77         int i, retval;
78
79         retval = rte_mempool_get(
80                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
81                         (void **)(&ctx));
82         if (!ctx || retval) {
83                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
84                 return NULL;
85         }
86         /*
87          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
88          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
89          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
90          * each packet, memset is costlier than dcbz_64().
91          */
92         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
93                 dcbz_64(&ctx->job.sg[i]);
94
95         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
96         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
97
98         return ctx;
99 }
100
101 static void
102 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
103                    struct qman_fq *fq,
104                    const struct qm_mr_entry *msg)
105 {
106         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
107                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
108 }
109
110 /* initialize the queue with dest chan as caam chan so that
111  * all the packets in this queue could be dispatched into caam
112  */
113 static int
114 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
115                  uint32_t fqid_out)
116 {
117         struct qm_mcc_initfq fq_opts;
118         uint32_t flags;
119         int ret = -1;
120
121         /* Clear FQ options */
122         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
123
124         flags = QMAN_INITFQ_FLAG_SCHED;
125         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
126                           QM_INITFQ_WE_CONTEXTB;
127
128         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
129         fq_opts.fqd.context_b = fqid_out;
130         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
131         fq_opts.fqd.dest.wq = 0;
132
133         fq_in->cb.ern  = ern_sec_fq_handler;
134
135         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
136
137         ret = qman_init_fq(fq_in, flags, &fq_opts);
138         if (unlikely(ret != 0))
139                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
140
141         return ret;
142 }
143
144 /* something is put into in_fq and caam put the crypto result into out_fq */
145 static enum qman_cb_dqrr_result
146 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
147                   struct qman_fq *fq __always_unused,
148                   const struct qm_dqrr_entry *dqrr)
149 {
150         const struct qm_fd *fd;
151         struct dpaa_sec_job *job;
152         struct dpaa_sec_op_ctx *ctx;
153
154         if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
155                 return qman_cb_dqrr_defer;
156
157         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
158                 return qman_cb_dqrr_consume;
159
160         fd = &dqrr->fd;
161         /* sg is embedded in an op ctx,
162          * sg[0] is for output
163          * sg[1] for input
164          */
165         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
166
167         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
168         ctx->fd_status = fd->status;
169         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
170                 struct qm_sg_entry *sg_out;
171                 uint32_t len;
172                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
173                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
174
175                 sg_out = &job->sg[0];
176                 hw_sg_to_cpu(sg_out);
177                 len = sg_out->length;
178                 mbuf->pkt_len = len;
179                 while (mbuf->next != NULL) {
180                         len -= mbuf->data_len;
181                         mbuf = mbuf->next;
182                 }
183                 mbuf->data_len = len;
184         }
185         DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
186         dpaa_sec_op_ending(ctx);
187
188         return qman_cb_dqrr_consume;
189 }
190
191 /* caam result is put into this queue */
192 static int
193 dpaa_sec_init_tx(struct qman_fq *fq)
194 {
195         int ret;
196         struct qm_mcc_initfq opts;
197         uint32_t flags;
198
199         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
200                 QMAN_FQ_FLAG_DYNAMIC_FQID;
201
202         ret = qman_create_fq(0, flags, fq);
203         if (unlikely(ret)) {
204                 DPAA_SEC_ERR("qman_create_fq failed");
205                 return ret;
206         }
207
208         memset(&opts, 0, sizeof(opts));
209         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
210                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
211
212         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
213
214         fq->cb.dqrr = dqrr_out_fq_cb_rx;
215         fq->cb.ern  = ern_sec_fq_handler;
216
217         ret = qman_init_fq(fq, 0, &opts);
218         if (unlikely(ret)) {
219                 DPAA_SEC_ERR("unable to init caam source fq!");
220                 return ret;
221         }
222
223         return ret;
224 }
225
226 static inline int is_aead(dpaa_sec_session *ses)
227 {
228         return ((ses->cipher_alg == 0) &&
229                 (ses->auth_alg == 0) &&
230                 (ses->aead_alg != 0));
231 }
232
233 static inline int is_encode(dpaa_sec_session *ses)
234 {
235         return ses->dir == DIR_ENC;
236 }
237
238 static inline int is_decode(dpaa_sec_session *ses)
239 {
240         return ses->dir == DIR_DEC;
241 }
242
243 #ifdef RTE_LIB_SECURITY
244 static int
245 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
246 {
247         struct alginfo authdata = {0}, cipherdata = {0};
248         struct sec_cdb *cdb = &ses->cdb;
249         struct alginfo *p_authdata = NULL;
250         int32_t shared_desc_len = 0;
251 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
252         int swap = false;
253 #else
254         int swap = true;
255 #endif
256
257         cipherdata.key = (size_t)ses->cipher_key.data;
258         cipherdata.keylen = ses->cipher_key.length;
259         cipherdata.key_enc_flags = 0;
260         cipherdata.key_type = RTA_DATA_IMM;
261         cipherdata.algtype = ses->cipher_key.alg;
262         cipherdata.algmode = ses->cipher_key.algmode;
263
264         if (ses->auth_alg) {
265                 authdata.key = (size_t)ses->auth_key.data;
266                 authdata.keylen = ses->auth_key.length;
267                 authdata.key_enc_flags = 0;
268                 authdata.key_type = RTA_DATA_IMM;
269                 authdata.algtype = ses->auth_key.alg;
270                 authdata.algmode = ses->auth_key.algmode;
271
272                 p_authdata = &authdata;
273         }
274
275         if (ses->pdcp.sdap_enabled) {
276                 int nb_keys_to_inline =
277                                 rta_inline_pdcp_sdap_query(authdata.algtype,
278                                         cipherdata.algtype,
279                                         ses->pdcp.sn_size,
280                                         ses->pdcp.hfn_ovd);
281                 if (nb_keys_to_inline >= 1) {
282                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
283                                                 (size_t)cipherdata.key);
284                         cipherdata.key_type = RTA_DATA_PTR;
285                 }
286                 if (nb_keys_to_inline >= 2) {
287                         authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
288                                                 (size_t)authdata.key);
289                         authdata.key_type = RTA_DATA_PTR;
290                 }
291         } else {
292                 if (rta_inline_pdcp_query(authdata.algtype,
293                                         cipherdata.algtype,
294                                         ses->pdcp.sn_size,
295                                         ses->pdcp.hfn_ovd)) {
296                         cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
297                                                 (size_t)cipherdata.key);
298                         cipherdata.key_type = RTA_DATA_PTR;
299                 }
300         }
301
302         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
303                 if (ses->dir == DIR_ENC)
304                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
305                                         cdb->sh_desc, 1, swap,
306                                         ses->pdcp.hfn,
307                                         ses->pdcp.sn_size,
308                                         ses->pdcp.bearer,
309                                         ses->pdcp.pkt_dir,
310                                         ses->pdcp.hfn_threshold,
311                                         &cipherdata, &authdata);
312                 else if (ses->dir == DIR_DEC)
313                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
314                                         cdb->sh_desc, 1, swap,
315                                         ses->pdcp.hfn,
316                                         ses->pdcp.sn_size,
317                                         ses->pdcp.bearer,
318                                         ses->pdcp.pkt_dir,
319                                         ses->pdcp.hfn_threshold,
320                                         &cipherdata, &authdata);
321         } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
322                 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
323                                                      1, swap, &authdata);
324         } else {
325                 if (ses->dir == DIR_ENC) {
326                         if (ses->pdcp.sdap_enabled)
327                                 shared_desc_len =
328                                         cnstr_shdsc_pdcp_sdap_u_plane_encap(
329                                                 cdb->sh_desc, 1, swap,
330                                                 ses->pdcp.sn_size,
331                                                 ses->pdcp.hfn,
332                                                 ses->pdcp.bearer,
333                                                 ses->pdcp.pkt_dir,
334                                                 ses->pdcp.hfn_threshold,
335                                                 &cipherdata, p_authdata);
336                         else
337                                 shared_desc_len =
338                                         cnstr_shdsc_pdcp_u_plane_encap(
339                                                 cdb->sh_desc, 1, swap,
340                                                 ses->pdcp.sn_size,
341                                                 ses->pdcp.hfn,
342                                                 ses->pdcp.bearer,
343                                                 ses->pdcp.pkt_dir,
344                                                 ses->pdcp.hfn_threshold,
345                                                 &cipherdata, p_authdata);
346                 } else if (ses->dir == DIR_DEC) {
347                         if (ses->pdcp.sdap_enabled)
348                                 shared_desc_len =
349                                         cnstr_shdsc_pdcp_sdap_u_plane_decap(
350                                                 cdb->sh_desc, 1, swap,
351                                                 ses->pdcp.sn_size,
352                                                 ses->pdcp.hfn,
353                                                 ses->pdcp.bearer,
354                                                 ses->pdcp.pkt_dir,
355                                                 ses->pdcp.hfn_threshold,
356                                                 &cipherdata, p_authdata);
357                         else
358                                 shared_desc_len =
359                                         cnstr_shdsc_pdcp_u_plane_decap(
360                                                 cdb->sh_desc, 1, swap,
361                                                 ses->pdcp.sn_size,
362                                                 ses->pdcp.hfn,
363                                                 ses->pdcp.bearer,
364                                                 ses->pdcp.pkt_dir,
365                                                 ses->pdcp.hfn_threshold,
366                                                 &cipherdata, p_authdata);
367                 }
368         }
369         return shared_desc_len;
370 }
371
372 /* prepare ipsec proto command block of the session */
373 static int
374 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
375 {
376         struct alginfo cipherdata = {0}, authdata = {0};
377         struct sec_cdb *cdb = &ses->cdb;
378         int32_t shared_desc_len = 0;
379         int err;
380 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
381         int swap = false;
382 #else
383         int swap = true;
384 #endif
385
386         cipherdata.key = (size_t)ses->cipher_key.data;
387         cipherdata.keylen = ses->cipher_key.length;
388         cipherdata.key_enc_flags = 0;
389         cipherdata.key_type = RTA_DATA_IMM;
390         cipherdata.algtype = ses->cipher_key.alg;
391         cipherdata.algmode = ses->cipher_key.algmode;
392
393         if (ses->auth_key.length) {
394                 authdata.key = (size_t)ses->auth_key.data;
395                 authdata.keylen = ses->auth_key.length;
396                 authdata.key_enc_flags = 0;
397                 authdata.key_type = RTA_DATA_IMM;
398                 authdata.algtype = ses->auth_key.alg;
399                 authdata.algmode = ses->auth_key.algmode;
400         }
401
402         cdb->sh_desc[0] = cipherdata.keylen;
403         cdb->sh_desc[1] = authdata.keylen;
404         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
405                                DESC_JOB_IO_LEN,
406                                (unsigned int *)cdb->sh_desc,
407                                &cdb->sh_desc[2], 2);
408
409         if (err < 0) {
410                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
411                 return err;
412         }
413         if (cdb->sh_desc[2] & 1)
414                 cipherdata.key_type = RTA_DATA_IMM;
415         else {
416                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
417                                         (void *)(size_t)cipherdata.key);
418                 cipherdata.key_type = RTA_DATA_PTR;
419         }
420         if (cdb->sh_desc[2] & (1<<1))
421                 authdata.key_type = RTA_DATA_IMM;
422         else {
423                 authdata.key = (size_t)rte_dpaa_mem_vtop(
424                                         (void *)(size_t)authdata.key);
425                 authdata.key_type = RTA_DATA_PTR;
426         }
427
428         cdb->sh_desc[0] = 0;
429         cdb->sh_desc[1] = 0;
430         cdb->sh_desc[2] = 0;
431         if (ses->dir == DIR_ENC) {
432                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
433                                 cdb->sh_desc,
434                                 true, swap, SHR_SERIAL,
435                                 &ses->encap_pdb,
436                                 (uint8_t *)&ses->ip4_hdr,
437                                 &cipherdata, &authdata);
438         } else if (ses->dir == DIR_DEC) {
439                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
440                                 cdb->sh_desc,
441                                 true, swap, SHR_SERIAL,
442                                 &ses->decap_pdb,
443                                 &cipherdata, &authdata);
444         }
445         return shared_desc_len;
446 }
447 #endif
448 /* prepare command block of the session */
449 static int
450 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
451 {
452         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
453         int32_t shared_desc_len = 0;
454         struct sec_cdb *cdb = &ses->cdb;
455         int err;
456 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
457         int swap = false;
458 #else
459         int swap = true;
460 #endif
461
462         memset(cdb, 0, sizeof(struct sec_cdb));
463
464         switch (ses->ctxt) {
465 #ifdef RTE_LIB_SECURITY
466         case DPAA_SEC_IPSEC:
467                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
468                 break;
469         case DPAA_SEC_PDCP:
470                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
471                 break;
472 #endif
473         case DPAA_SEC_CIPHER:
474                 alginfo_c.key = (size_t)ses->cipher_key.data;
475                 alginfo_c.keylen = ses->cipher_key.length;
476                 alginfo_c.key_enc_flags = 0;
477                 alginfo_c.key_type = RTA_DATA_IMM;
478                 alginfo_c.algtype = ses->cipher_key.alg;
479                 alginfo_c.algmode = ses->cipher_key.algmode;
480
481                 switch (ses->cipher_alg) {
482                 case RTE_CRYPTO_CIPHER_AES_CBC:
483                 case RTE_CRYPTO_CIPHER_3DES_CBC:
484                 case RTE_CRYPTO_CIPHER_DES_CBC:
485                 case RTE_CRYPTO_CIPHER_AES_CTR:
486                 case RTE_CRYPTO_CIPHER_3DES_CTR:
487                         shared_desc_len = cnstr_shdsc_blkcipher(
488                                         cdb->sh_desc, true,
489                                         swap, SHR_NEVER, &alginfo_c,
490                                         ses->iv.length,
491                                         ses->dir);
492                         break;
493                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
494                         shared_desc_len = cnstr_shdsc_snow_f8(
495                                         cdb->sh_desc, true, swap,
496                                         &alginfo_c,
497                                         ses->dir);
498                         break;
499                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
500                         shared_desc_len = cnstr_shdsc_zuce(
501                                         cdb->sh_desc, true, swap,
502                                         &alginfo_c,
503                                         ses->dir);
504                         break;
505                 default:
506                         DPAA_SEC_ERR("unsupported cipher alg %d",
507                                      ses->cipher_alg);
508                         return -ENOTSUP;
509                 }
510                 break;
511         case DPAA_SEC_AUTH:
512                 alginfo_a.key = (size_t)ses->auth_key.data;
513                 alginfo_a.keylen = ses->auth_key.length;
514                 alginfo_a.key_enc_flags = 0;
515                 alginfo_a.key_type = RTA_DATA_IMM;
516                 alginfo_a.algtype = ses->auth_key.alg;
517                 alginfo_a.algmode = ses->auth_key.algmode;
518                 switch (ses->auth_alg) {
519                 case RTE_CRYPTO_AUTH_MD5:
520                 case RTE_CRYPTO_AUTH_SHA1:
521                 case RTE_CRYPTO_AUTH_SHA224:
522                 case RTE_CRYPTO_AUTH_SHA256:
523                 case RTE_CRYPTO_AUTH_SHA384:
524                 case RTE_CRYPTO_AUTH_SHA512:
525                         shared_desc_len = cnstr_shdsc_hash(
526                                                 cdb->sh_desc, true,
527                                                 swap, SHR_NEVER, &alginfo_a,
528                                                 !ses->dir,
529                                                 ses->digest_length);
530                         break;
531                 case RTE_CRYPTO_AUTH_MD5_HMAC:
532                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
533                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
534                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
535                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
536                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
537                         shared_desc_len = cnstr_shdsc_hmac(
538                                                 cdb->sh_desc, true,
539                                                 swap, SHR_NEVER, &alginfo_a,
540                                                 !ses->dir,
541                                                 ses->digest_length);
542                         break;
543                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
544                         shared_desc_len = cnstr_shdsc_snow_f9(
545                                                 cdb->sh_desc, true, swap,
546                                                 &alginfo_a,
547                                                 !ses->dir,
548                                                 ses->digest_length);
549                         break;
550                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
551                         shared_desc_len = cnstr_shdsc_zuca(
552                                                 cdb->sh_desc, true, swap,
553                                                 &alginfo_a,
554                                                 !ses->dir,
555                                                 ses->digest_length);
556                         break;
557                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
558                 case RTE_CRYPTO_AUTH_AES_CMAC:
559                         shared_desc_len = cnstr_shdsc_aes_mac(
560                                                 cdb->sh_desc,
561                                                 true, swap, SHR_NEVER,
562                                                 &alginfo_a,
563                                                 !ses->dir,
564                                                 ses->digest_length);
565                         break;
566                 default:
567                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
568                 }
569                 break;
570         case DPAA_SEC_AEAD:
571                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
572                         DPAA_SEC_ERR("not supported aead alg");
573                         return -ENOTSUP;
574                 }
575                 alginfo.key = (size_t)ses->aead_key.data;
576                 alginfo.keylen = ses->aead_key.length;
577                 alginfo.key_enc_flags = 0;
578                 alginfo.key_type = RTA_DATA_IMM;
579                 alginfo.algtype = ses->aead_key.alg;
580                 alginfo.algmode = ses->aead_key.algmode;
581
582                 if (ses->dir == DIR_ENC)
583                         shared_desc_len = cnstr_shdsc_gcm_encap(
584                                         cdb->sh_desc, true, swap, SHR_NEVER,
585                                         &alginfo,
586                                         ses->iv.length,
587                                         ses->digest_length);
588                 else
589                         shared_desc_len = cnstr_shdsc_gcm_decap(
590                                         cdb->sh_desc, true, swap, SHR_NEVER,
591                                         &alginfo,
592                                         ses->iv.length,
593                                         ses->digest_length);
594                 break;
595         case DPAA_SEC_CIPHER_HASH:
596                 alginfo_c.key = (size_t)ses->cipher_key.data;
597                 alginfo_c.keylen = ses->cipher_key.length;
598                 alginfo_c.key_enc_flags = 0;
599                 alginfo_c.key_type = RTA_DATA_IMM;
600                 alginfo_c.algtype = ses->cipher_key.alg;
601                 alginfo_c.algmode = ses->cipher_key.algmode;
602
603                 alginfo_a.key = (size_t)ses->auth_key.data;
604                 alginfo_a.keylen = ses->auth_key.length;
605                 alginfo_a.key_enc_flags = 0;
606                 alginfo_a.key_type = RTA_DATA_IMM;
607                 alginfo_a.algtype = ses->auth_key.alg;
608                 alginfo_a.algmode = ses->auth_key.algmode;
609
610                 cdb->sh_desc[0] = alginfo_c.keylen;
611                 cdb->sh_desc[1] = alginfo_a.keylen;
612                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
613                                        DESC_JOB_IO_LEN,
614                                        (unsigned int *)cdb->sh_desc,
615                                        &cdb->sh_desc[2], 2);
616
617                 if (err < 0) {
618                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
619                         return err;
620                 }
621                 if (cdb->sh_desc[2] & 1)
622                         alginfo_c.key_type = RTA_DATA_IMM;
623                 else {
624                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
625                                                 (void *)(size_t)alginfo_c.key);
626                         alginfo_c.key_type = RTA_DATA_PTR;
627                 }
628                 if (cdb->sh_desc[2] & (1<<1))
629                         alginfo_a.key_type = RTA_DATA_IMM;
630                 else {
631                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
632                                                 (void *)(size_t)alginfo_a.key);
633                         alginfo_a.key_type = RTA_DATA_PTR;
634                 }
635                 cdb->sh_desc[0] = 0;
636                 cdb->sh_desc[1] = 0;
637                 cdb->sh_desc[2] = 0;
638                 /* Auth_only_len is set as 0 here and it will be
639                  * overwritten in fd for each packet.
640                  */
641                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
642                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
643                                 ses->iv.length,
644                                 ses->digest_length, ses->dir);
645                 break;
646         case DPAA_SEC_HASH_CIPHER:
647         default:
648                 DPAA_SEC_ERR("error: Unsupported session");
649                 return -ENOTSUP;
650         }
651
652         if (shared_desc_len < 0) {
653                 DPAA_SEC_ERR("error in preparing command block");
654                 return shared_desc_len;
655         }
656
657         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
658         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
659         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
660
661         return 0;
662 }
663
664 static void
665 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
666 {
667         struct dpaa_sec_job *job = &ctx->job;
668         struct rte_crypto_op *op = ctx->op;
669         dpaa_sec_session *sess = NULL;
670         struct sec_cdb c_cdb, *cdb;
671         uint8_t bufsize;
672         struct rte_crypto_sym_op *sym_op;
673         struct qm_sg_entry sg[2];
674
675         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
676                 sess = (dpaa_sec_session *)
677                         get_sym_session_private_data(
678                                         op->sym->session,
679                                         dpaa_cryptodev_driver_id);
680 #ifdef RTE_LIBRTE_SECURITY
681         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
682                 sess = (dpaa_sec_session *)
683                         get_sec_session_private_data(
684                                         op->sym->sec_session);
685 #endif
686         if (sess == NULL) {
687                 printf("session is NULL\n");
688                 goto mbuf_dump;
689         }
690
691         cdb = &sess->cdb;
692         rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
693 #ifdef RTE_LIBRTE_SECURITY
694         printf("\nsession protocol type = %d\n", sess->proto_alg);
695 #endif
696         printf("\n****************************************\n"
697                 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
698                 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
699                 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
700                 "\tCipher algmode:\t%d\n", sess->ctxt,
701                 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
702                 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
703                 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
704                 sess->cipher_key.algmode);
705                 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
706                                 sess->cipher_key.length);
707                 rte_hexdump(stdout, "auth key", sess->auth_key.data,
708                                 sess->auth_key.length);
709         printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
710                 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
711                 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
712                 "\taead cipher text:\t%d\n",
713                 (uint64_t)sess->auth_key.length, sess->auth_key.alg,
714                 sess->auth_key.algmode,
715                 sess->iv.length, sess->iv.offset,
716                 sess->digest_length, sess->auth_only_len,
717                 sess->auth_cipher_text);
718 #ifdef RTE_LIBRTE_SECURITY
719         printf("PDCP session params:\n"
720                 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
721                 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
722                 "\t%d\n\thfn:\t\t%d\n"
723                 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
724                 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
725                 sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
726                 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
727                 sess->pdcp.hfn_threshold);
728 #endif
729         c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
730         c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
731         bufsize = c_cdb.sh_hdr.hi.field.idlen;
732
733         printf("cdb = %p\n\n", cdb);
734         printf("Descriptor size = %d\n", bufsize);
735         int m;
736         for (m = 0; m < bufsize; m++)
737                 printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
738
739         printf("\n");
740 mbuf_dump:
741         sym_op = op->sym;
742         if (sym_op->m_src) {
743                 printf("Source mbuf:\n");
744                 rte_pktmbuf_dump(stdout, sym_op->m_src,
745                                  sym_op->m_src->data_len);
746         }
747         if (sym_op->m_dst) {
748                 printf("Destination mbuf:\n");
749                 rte_pktmbuf_dump(stdout, sym_op->m_dst,
750                                  sym_op->m_dst->data_len);
751         }
752
753         printf("Session address = %p\ncipher offset: %d, length: %d\n"
754                 "auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
755                 sym_op->session, sym_op->cipher.data.offset,
756                 sym_op->cipher.data.length,
757                 sym_op->auth.data.offset, sym_op->auth.data.length,
758                 sym_op->aead.data.offset, sym_op->aead.data.length);
759         printf("\n");
760
761         printf("******************************************************\n");
762         printf("ctx info:\n");
763         printf("job->sg[0] output info:\n");
764         memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
765         printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
766                 "\n\tbpid = %d\n\toffset = %d\n",
767                 (uint64_t)sg[0].addr, sg[0].length, sg[0].final,
768                 sg[0].extension, sg[0].bpid, sg[0].offset);
769         printf("\njob->sg[1] input info:\n");
770         memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
771         hw_sg_to_cpu(&sg[1]);
772         printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
773                 "\n\tbpid = %d\n\toffset = %d\n",
774                 (uint64_t)sg[1].addr, sg[1].length, sg[1].final,
775                 sg[1].extension, sg[1].bpid, sg[1].offset);
776
777         printf("\nctx pool addr = %p\n", ctx->ctx_pool);
778         if (ctx->ctx_pool)
779                 printf("ctx pool available counts = %d\n",
780                         rte_mempool_avail_count(ctx->ctx_pool));
781
782         printf("\nop pool addr = %p\n", op->mempool);
783         if (op->mempool)
784                 printf("op pool available counts = %d\n",
785                         rte_mempool_avail_count(op->mempool));
786
787         printf("********************************************************\n");
788         printf("Queue data:\n");
789         printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
790                 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
791                "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
792                 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
793                 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
794                 qp->rx_errs, qp->tx_errs);
795 }
796
797 /* qp is lockless, should be accessed by only one thread */
798 static int
799 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
800 {
801         struct qman_fq *fq;
802         unsigned int pkts = 0;
803         int num_rx_bufs, ret;
804         struct qm_dqrr_entry *dq;
805         uint32_t vdqcr_flags = 0;
806
807         fq = &qp->outq;
808         /*
809          * Until request for four buffers, we provide exact number of buffers.
810          * Otherwise we do not set the QM_VDQCR_EXACT flag.
811          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
812          * requested, so we request two less in this case.
813          */
814         if (nb_ops < 4) {
815                 vdqcr_flags = QM_VDQCR_EXACT;
816                 num_rx_bufs = nb_ops;
817         } else {
818                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
819                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
820         }
821         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
822         if (ret)
823                 return 0;
824
825         do {
826                 const struct qm_fd *fd;
827                 struct dpaa_sec_job *job;
828                 struct dpaa_sec_op_ctx *ctx;
829                 struct rte_crypto_op *op;
830
831                 dq = qman_dequeue(fq);
832                 if (!dq)
833                         continue;
834
835                 fd = &dq->fd;
836                 /* sg is embedded in an op ctx,
837                  * sg[0] is for output
838                  * sg[1] for input
839                  */
840                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
841
842                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
843                 ctx->fd_status = fd->status;
844                 op = ctx->op;
845                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
846                         struct qm_sg_entry *sg_out;
847                         uint32_t len;
848                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
849                                                 op->sym->m_src : op->sym->m_dst;
850
851                         sg_out = &job->sg[0];
852                         hw_sg_to_cpu(sg_out);
853                         len = sg_out->length;
854                         mbuf->pkt_len = len;
855                         while (mbuf->next != NULL) {
856                                 len -= mbuf->data_len;
857                                 mbuf = mbuf->next;
858                         }
859                         mbuf->data_len = len;
860                 }
861                 if (!ctx->fd_status) {
862                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
863                 } else {
864                         if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
865                                 DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
866                                                   ctx->fd_status);
867                                 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
868                                         dpaa_sec_dump(ctx, qp);
869                         }
870                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
871                 }
872                 ops[pkts++] = op;
873
874                 /* report op status to sym->op and then free the ctx memory */
875                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
876
877                 qman_dqrr_consume(fq, dq);
878         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
879
880         return pkts;
881 }
882
883 static inline struct dpaa_sec_job *
884 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
885 {
886         struct rte_crypto_sym_op *sym = op->sym;
887         struct rte_mbuf *mbuf = sym->m_src;
888         struct dpaa_sec_job *cf;
889         struct dpaa_sec_op_ctx *ctx;
890         struct qm_sg_entry *sg, *out_sg, *in_sg;
891         phys_addr_t start_addr;
892         uint8_t *old_digest, extra_segs;
893         int data_len, data_offset;
894
895         data_len = sym->auth.data.length;
896         data_offset = sym->auth.data.offset;
897
898         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
899             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
900                 if ((data_len & 7) || (data_offset & 7)) {
901                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
902                         return NULL;
903                 }
904
905                 data_len = data_len >> 3;
906                 data_offset = data_offset >> 3;
907         }
908
909         if (is_decode(ses))
910                 extra_segs = 3;
911         else
912                 extra_segs = 2;
913
914         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
915                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
916                                 MAX_SG_ENTRIES);
917                 return NULL;
918         }
919         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
920         if (!ctx)
921                 return NULL;
922
923         cf = &ctx->job;
924         ctx->op = op;
925         old_digest = ctx->digest;
926
927         /* output */
928         out_sg = &cf->sg[0];
929         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
930         out_sg->length = ses->digest_length;
931         cpu_to_hw_sg(out_sg);
932
933         /* input */
934         in_sg = &cf->sg[1];
935         /* need to extend the input to a compound frame */
936         in_sg->extension = 1;
937         in_sg->final = 1;
938         in_sg->length = data_len;
939         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
940
941         /* 1st seg */
942         sg = in_sg + 1;
943
944         if (ses->iv.length) {
945                 uint8_t *iv_ptr;
946
947                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
948                                                    ses->iv.offset);
949
950                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
951                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
952                         sg->length = 12;
953                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
954                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
955                         sg->length = 8;
956                 } else {
957                         sg->length = ses->iv.length;
958                 }
959                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
960                 in_sg->length += sg->length;
961                 cpu_to_hw_sg(sg);
962                 sg++;
963         }
964
965         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
966         sg->offset = data_offset;
967
968         if (data_len <= (mbuf->data_len - data_offset)) {
969                 sg->length = data_len;
970         } else {
971                 sg->length = mbuf->data_len - data_offset;
972
973                 /* remaining i/p segs */
974                 while ((data_len = data_len - sg->length) &&
975                        (mbuf = mbuf->next)) {
976                         cpu_to_hw_sg(sg);
977                         sg++;
978                         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
979                         if (data_len > mbuf->data_len)
980                                 sg->length = mbuf->data_len;
981                         else
982                                 sg->length = data_len;
983                 }
984         }
985
986         if (is_decode(ses)) {
987                 /* Digest verification case */
988                 cpu_to_hw_sg(sg);
989                 sg++;
990                 rte_memcpy(old_digest, sym->auth.digest.data,
991                                 ses->digest_length);
992                 start_addr = rte_dpaa_mem_vtop(old_digest);
993                 qm_sg_entry_set64(sg, start_addr);
994                 sg->length = ses->digest_length;
995                 in_sg->length += ses->digest_length;
996         }
997         sg->final = 1;
998         cpu_to_hw_sg(sg);
999         cpu_to_hw_sg(in_sg);
1000
1001         return cf;
1002 }
1003
1004 /**
1005  * packet looks like:
1006  *              |<----data_len------->|
1007  *    |ip_header|ah_header|icv|payload|
1008  *              ^
1009  *              |
1010  *         mbuf->pkt.data
1011  */
1012 static inline struct dpaa_sec_job *
1013 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1014 {
1015         struct rte_crypto_sym_op *sym = op->sym;
1016         struct rte_mbuf *mbuf = sym->m_src;
1017         struct dpaa_sec_job *cf;
1018         struct dpaa_sec_op_ctx *ctx;
1019         struct qm_sg_entry *sg, *in_sg;
1020         rte_iova_t start_addr;
1021         uint8_t *old_digest;
1022         int data_len, data_offset;
1023
1024         data_len = sym->auth.data.length;
1025         data_offset = sym->auth.data.offset;
1026
1027         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1028             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1029                 if ((data_len & 7) || (data_offset & 7)) {
1030                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1031                         return NULL;
1032                 }
1033
1034                 data_len = data_len >> 3;
1035                 data_offset = data_offset >> 3;
1036         }
1037
1038         ctx = dpaa_sec_alloc_ctx(ses, 4);
1039         if (!ctx)
1040                 return NULL;
1041
1042         cf = &ctx->job;
1043         ctx->op = op;
1044         old_digest = ctx->digest;
1045
1046         start_addr = rte_pktmbuf_iova(mbuf);
1047         /* output */
1048         sg = &cf->sg[0];
1049         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1050         sg->length = ses->digest_length;
1051         cpu_to_hw_sg(sg);
1052
1053         /* input */
1054         in_sg = &cf->sg[1];
1055         /* need to extend the input to a compound frame */
1056         in_sg->extension = 1;
1057         in_sg->final = 1;
1058         in_sg->length = data_len;
1059         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1060         sg = &cf->sg[2];
1061
1062         if (ses->iv.length) {
1063                 uint8_t *iv_ptr;
1064
1065                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1066                                                    ses->iv.offset);
1067
1068                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1069                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1070                         sg->length = 12;
1071                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1072                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1073                         sg->length = 8;
1074                 } else {
1075                         sg->length = ses->iv.length;
1076                 }
1077                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1078                 in_sg->length += sg->length;
1079                 cpu_to_hw_sg(sg);
1080                 sg++;
1081         }
1082
1083         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1084         sg->offset = data_offset;
1085         sg->length = data_len;
1086
1087         if (is_decode(ses)) {
1088                 /* Digest verification case */
1089                 cpu_to_hw_sg(sg);
1090                 /* hash result or digest, save digest first */
1091                 rte_memcpy(old_digest, sym->auth.digest.data,
1092                                 ses->digest_length);
1093                 /* let's check digest by hw */
1094                 start_addr = rte_dpaa_mem_vtop(old_digest);
1095                 sg++;
1096                 qm_sg_entry_set64(sg, start_addr);
1097                 sg->length = ses->digest_length;
1098                 in_sg->length += ses->digest_length;
1099         }
1100         sg->final = 1;
1101         cpu_to_hw_sg(sg);
1102         cpu_to_hw_sg(in_sg);
1103
1104         return cf;
1105 }
1106
1107 static inline struct dpaa_sec_job *
1108 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1109 {
1110         struct rte_crypto_sym_op *sym = op->sym;
1111         struct dpaa_sec_job *cf;
1112         struct dpaa_sec_op_ctx *ctx;
1113         struct qm_sg_entry *sg, *out_sg, *in_sg;
1114         struct rte_mbuf *mbuf;
1115         uint8_t req_segs;
1116         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1117                         ses->iv.offset);
1118         int data_len, data_offset;
1119
1120         data_len = sym->cipher.data.length;
1121         data_offset = sym->cipher.data.offset;
1122
1123         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1124                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1125                 if ((data_len & 7) || (data_offset & 7)) {
1126                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1127                         return NULL;
1128                 }
1129
1130                 data_len = data_len >> 3;
1131                 data_offset = data_offset >> 3;
1132         }
1133
1134         if (sym->m_dst) {
1135                 mbuf = sym->m_dst;
1136                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1137         } else {
1138                 mbuf = sym->m_src;
1139                 req_segs = mbuf->nb_segs * 2 + 3;
1140         }
1141         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1142                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1143                                 MAX_SG_ENTRIES);
1144                 return NULL;
1145         }
1146
1147         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1148         if (!ctx)
1149                 return NULL;
1150
1151         cf = &ctx->job;
1152         ctx->op = op;
1153
1154         /* output */
1155         out_sg = &cf->sg[0];
1156         out_sg->extension = 1;
1157         out_sg->length = data_len;
1158         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1159         cpu_to_hw_sg(out_sg);
1160
1161         /* 1st seg */
1162         sg = &cf->sg[2];
1163         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1164         sg->length = mbuf->data_len - data_offset;
1165         sg->offset = data_offset;
1166
1167         /* Successive segs */
1168         mbuf = mbuf->next;
1169         while (mbuf) {
1170                 cpu_to_hw_sg(sg);
1171                 sg++;
1172                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1173                 sg->length = mbuf->data_len;
1174                 mbuf = mbuf->next;
1175         }
1176         sg->final = 1;
1177         cpu_to_hw_sg(sg);
1178
1179         /* input */
1180         mbuf = sym->m_src;
1181         in_sg = &cf->sg[1];
1182         in_sg->extension = 1;
1183         in_sg->final = 1;
1184         in_sg->length = data_len + ses->iv.length;
1185
1186         sg++;
1187         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1188         cpu_to_hw_sg(in_sg);
1189
1190         /* IV */
1191         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1192         sg->length = ses->iv.length;
1193         cpu_to_hw_sg(sg);
1194
1195         /* 1st seg */
1196         sg++;
1197         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1198         sg->length = mbuf->data_len - data_offset;
1199         sg->offset = data_offset;
1200
1201         /* Successive segs */
1202         mbuf = mbuf->next;
1203         while (mbuf) {
1204                 cpu_to_hw_sg(sg);
1205                 sg++;
1206                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1207                 sg->length = mbuf->data_len;
1208                 mbuf = mbuf->next;
1209         }
1210         sg->final = 1;
1211         cpu_to_hw_sg(sg);
1212
1213         return cf;
1214 }
1215
1216 static inline struct dpaa_sec_job *
1217 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1218 {
1219         struct rte_crypto_sym_op *sym = op->sym;
1220         struct dpaa_sec_job *cf;
1221         struct dpaa_sec_op_ctx *ctx;
1222         struct qm_sg_entry *sg;
1223         rte_iova_t src_start_addr, dst_start_addr;
1224         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1225                         ses->iv.offset);
1226         int data_len, data_offset;
1227
1228         data_len = sym->cipher.data.length;
1229         data_offset = sym->cipher.data.offset;
1230
1231         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1232                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1233                 if ((data_len & 7) || (data_offset & 7)) {
1234                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1235                         return NULL;
1236                 }
1237
1238                 data_len = data_len >> 3;
1239                 data_offset = data_offset >> 3;
1240         }
1241
1242         ctx = dpaa_sec_alloc_ctx(ses, 4);
1243         if (!ctx)
1244                 return NULL;
1245
1246         cf = &ctx->job;
1247         ctx->op = op;
1248
1249         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1250
1251         if (sym->m_dst)
1252                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1253         else
1254                 dst_start_addr = src_start_addr;
1255
1256         /* output */
1257         sg = &cf->sg[0];
1258         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1259         sg->length = data_len + ses->iv.length;
1260         cpu_to_hw_sg(sg);
1261
1262         /* input */
1263         sg = &cf->sg[1];
1264
1265         /* need to extend the input to a compound frame */
1266         sg->extension = 1;
1267         sg->final = 1;
1268         sg->length = data_len + ses->iv.length;
1269         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1270         cpu_to_hw_sg(sg);
1271
1272         sg = &cf->sg[2];
1273         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1274         sg->length = ses->iv.length;
1275         cpu_to_hw_sg(sg);
1276
1277         sg++;
1278         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1279         sg->length = data_len;
1280         sg->final = 1;
1281         cpu_to_hw_sg(sg);
1282
1283         return cf;
1284 }
1285
1286 static inline struct dpaa_sec_job *
1287 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1288 {
1289         struct rte_crypto_sym_op *sym = op->sym;
1290         struct dpaa_sec_job *cf;
1291         struct dpaa_sec_op_ctx *ctx;
1292         struct qm_sg_entry *sg, *out_sg, *in_sg;
1293         struct rte_mbuf *mbuf;
1294         uint8_t req_segs;
1295         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1296                         ses->iv.offset);
1297
1298         if (sym->m_dst) {
1299                 mbuf = sym->m_dst;
1300                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1301         } else {
1302                 mbuf = sym->m_src;
1303                 req_segs = mbuf->nb_segs * 2 + 4;
1304         }
1305
1306         if (ses->auth_only_len)
1307                 req_segs++;
1308
1309         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1310                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1311                                 MAX_SG_ENTRIES);
1312                 return NULL;
1313         }
1314
1315         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1316         if (!ctx)
1317                 return NULL;
1318
1319         cf = &ctx->job;
1320         ctx->op = op;
1321
1322         rte_prefetch0(cf->sg);
1323
1324         /* output */
1325         out_sg = &cf->sg[0];
1326         out_sg->extension = 1;
1327         if (is_encode(ses))
1328                 out_sg->length = sym->aead.data.length + ses->digest_length;
1329         else
1330                 out_sg->length = sym->aead.data.length;
1331
1332         /* output sg entries */
1333         sg = &cf->sg[2];
1334         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1335         cpu_to_hw_sg(out_sg);
1336
1337         /* 1st seg */
1338         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1339         sg->length = mbuf->data_len - sym->aead.data.offset;
1340         sg->offset = sym->aead.data.offset;
1341
1342         /* Successive segs */
1343         mbuf = mbuf->next;
1344         while (mbuf) {
1345                 cpu_to_hw_sg(sg);
1346                 sg++;
1347                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1348                 sg->length = mbuf->data_len;
1349                 mbuf = mbuf->next;
1350         }
1351         sg->length -= ses->digest_length;
1352
1353         if (is_encode(ses)) {
1354                 cpu_to_hw_sg(sg);
1355                 /* set auth output */
1356                 sg++;
1357                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1358                 sg->length = ses->digest_length;
1359         }
1360         sg->final = 1;
1361         cpu_to_hw_sg(sg);
1362
1363         /* input */
1364         mbuf = sym->m_src;
1365         in_sg = &cf->sg[1];
1366         in_sg->extension = 1;
1367         in_sg->final = 1;
1368         if (is_encode(ses))
1369                 in_sg->length = ses->iv.length + sym->aead.data.length
1370                                                         + ses->auth_only_len;
1371         else
1372                 in_sg->length = ses->iv.length + sym->aead.data.length
1373                                 + ses->auth_only_len + ses->digest_length;
1374
1375         /* input sg entries */
1376         sg++;
1377         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1378         cpu_to_hw_sg(in_sg);
1379
1380         /* 1st seg IV */
1381         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1382         sg->length = ses->iv.length;
1383         cpu_to_hw_sg(sg);
1384
1385         /* 2nd seg auth only */
1386         if (ses->auth_only_len) {
1387                 sg++;
1388                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1389                 sg->length = ses->auth_only_len;
1390                 cpu_to_hw_sg(sg);
1391         }
1392
1393         /* 3rd seg */
1394         sg++;
1395         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1396         sg->length = mbuf->data_len - sym->aead.data.offset;
1397         sg->offset = sym->aead.data.offset;
1398
1399         /* Successive segs */
1400         mbuf = mbuf->next;
1401         while (mbuf) {
1402                 cpu_to_hw_sg(sg);
1403                 sg++;
1404                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1405                 sg->length = mbuf->data_len;
1406                 mbuf = mbuf->next;
1407         }
1408
1409         if (is_decode(ses)) {
1410                 cpu_to_hw_sg(sg);
1411                 sg++;
1412                 memcpy(ctx->digest, sym->aead.digest.data,
1413                         ses->digest_length);
1414                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1415                 sg->length = ses->digest_length;
1416         }
1417         sg->final = 1;
1418         cpu_to_hw_sg(sg);
1419
1420         return cf;
1421 }
1422
1423 static inline struct dpaa_sec_job *
1424 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1425 {
1426         struct rte_crypto_sym_op *sym = op->sym;
1427         struct dpaa_sec_job *cf;
1428         struct dpaa_sec_op_ctx *ctx;
1429         struct qm_sg_entry *sg;
1430         uint32_t length = 0;
1431         rte_iova_t src_start_addr, dst_start_addr;
1432         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1433                         ses->iv.offset);
1434
1435         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1436
1437         if (sym->m_dst)
1438                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1439         else
1440                 dst_start_addr = src_start_addr;
1441
1442         ctx = dpaa_sec_alloc_ctx(ses, 7);
1443         if (!ctx)
1444                 return NULL;
1445
1446         cf = &ctx->job;
1447         ctx->op = op;
1448
1449         /* input */
1450         rte_prefetch0(cf->sg);
1451         sg = &cf->sg[2];
1452         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1453         if (is_encode(ses)) {
1454                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1455                 sg->length = ses->iv.length;
1456                 length += sg->length;
1457                 cpu_to_hw_sg(sg);
1458
1459                 sg++;
1460                 if (ses->auth_only_len) {
1461                         qm_sg_entry_set64(sg,
1462                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1463                         sg->length = ses->auth_only_len;
1464                         length += sg->length;
1465                         cpu_to_hw_sg(sg);
1466                         sg++;
1467                 }
1468                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1469                 sg->length = sym->aead.data.length;
1470                 length += sg->length;
1471                 sg->final = 1;
1472                 cpu_to_hw_sg(sg);
1473         } else {
1474                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1475                 sg->length = ses->iv.length;
1476                 length += sg->length;
1477                 cpu_to_hw_sg(sg);
1478
1479                 sg++;
1480                 if (ses->auth_only_len) {
1481                         qm_sg_entry_set64(sg,
1482                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1483                         sg->length = ses->auth_only_len;
1484                         length += sg->length;
1485                         cpu_to_hw_sg(sg);
1486                         sg++;
1487                 }
1488                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1489                 sg->length = sym->aead.data.length;
1490                 length += sg->length;
1491                 cpu_to_hw_sg(sg);
1492
1493                 memcpy(ctx->digest, sym->aead.digest.data,
1494                        ses->digest_length);
1495                 sg++;
1496
1497                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1498                 sg->length = ses->digest_length;
1499                 length += sg->length;
1500                 sg->final = 1;
1501                 cpu_to_hw_sg(sg);
1502         }
1503         /* input compound frame */
1504         cf->sg[1].length = length;
1505         cf->sg[1].extension = 1;
1506         cf->sg[1].final = 1;
1507         cpu_to_hw_sg(&cf->sg[1]);
1508
1509         /* output */
1510         sg++;
1511         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1512         qm_sg_entry_set64(sg,
1513                 dst_start_addr + sym->aead.data.offset);
1514         sg->length = sym->aead.data.length;
1515         length = sg->length;
1516         if (is_encode(ses)) {
1517                 cpu_to_hw_sg(sg);
1518                 /* set auth output */
1519                 sg++;
1520                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1521                 sg->length = ses->digest_length;
1522                 length += sg->length;
1523         }
1524         sg->final = 1;
1525         cpu_to_hw_sg(sg);
1526
1527         /* output compound frame */
1528         cf->sg[0].length = length;
1529         cf->sg[0].extension = 1;
1530         cpu_to_hw_sg(&cf->sg[0]);
1531
1532         return cf;
1533 }
1534
1535 static inline struct dpaa_sec_job *
1536 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1537 {
1538         struct rte_crypto_sym_op *sym = op->sym;
1539         struct dpaa_sec_job *cf;
1540         struct dpaa_sec_op_ctx *ctx;
1541         struct qm_sg_entry *sg, *out_sg, *in_sg;
1542         struct rte_mbuf *mbuf;
1543         uint8_t req_segs;
1544         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1545                         ses->iv.offset);
1546
1547         if (sym->m_dst) {
1548                 mbuf = sym->m_dst;
1549                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1550         } else {
1551                 mbuf = sym->m_src;
1552                 req_segs = mbuf->nb_segs * 2 + 4;
1553         }
1554
1555         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1556                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1557                                 MAX_SG_ENTRIES);
1558                 return NULL;
1559         }
1560
1561         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1562         if (!ctx)
1563                 return NULL;
1564
1565         cf = &ctx->job;
1566         ctx->op = op;
1567
1568         rte_prefetch0(cf->sg);
1569
1570         /* output */
1571         out_sg = &cf->sg[0];
1572         out_sg->extension = 1;
1573         if (is_encode(ses))
1574                 out_sg->length = sym->auth.data.length + ses->digest_length;
1575         else
1576                 out_sg->length = sym->auth.data.length;
1577
1578         /* output sg entries */
1579         sg = &cf->sg[2];
1580         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1581         cpu_to_hw_sg(out_sg);
1582
1583         /* 1st seg */
1584         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1585         sg->length = mbuf->data_len - sym->auth.data.offset;
1586         sg->offset = sym->auth.data.offset;
1587
1588         /* Successive segs */
1589         mbuf = mbuf->next;
1590         while (mbuf) {
1591                 cpu_to_hw_sg(sg);
1592                 sg++;
1593                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1594                 sg->length = mbuf->data_len;
1595                 mbuf = mbuf->next;
1596         }
1597         sg->length -= ses->digest_length;
1598
1599         if (is_encode(ses)) {
1600                 cpu_to_hw_sg(sg);
1601                 /* set auth output */
1602                 sg++;
1603                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1604                 sg->length = ses->digest_length;
1605         }
1606         sg->final = 1;
1607         cpu_to_hw_sg(sg);
1608
1609         /* input */
1610         mbuf = sym->m_src;
1611         in_sg = &cf->sg[1];
1612         in_sg->extension = 1;
1613         in_sg->final = 1;
1614         if (is_encode(ses))
1615                 in_sg->length = ses->iv.length + sym->auth.data.length;
1616         else
1617                 in_sg->length = ses->iv.length + sym->auth.data.length
1618                                                 + ses->digest_length;
1619
1620         /* input sg entries */
1621         sg++;
1622         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1623         cpu_to_hw_sg(in_sg);
1624
1625         /* 1st seg IV */
1626         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1627         sg->length = ses->iv.length;
1628         cpu_to_hw_sg(sg);
1629
1630         /* 2nd seg */
1631         sg++;
1632         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1633         sg->length = mbuf->data_len - sym->auth.data.offset;
1634         sg->offset = sym->auth.data.offset;
1635
1636         /* Successive segs */
1637         mbuf = mbuf->next;
1638         while (mbuf) {
1639                 cpu_to_hw_sg(sg);
1640                 sg++;
1641                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1642                 sg->length = mbuf->data_len;
1643                 mbuf = mbuf->next;
1644         }
1645
1646         sg->length -= ses->digest_length;
1647         if (is_decode(ses)) {
1648                 cpu_to_hw_sg(sg);
1649                 sg++;
1650                 memcpy(ctx->digest, sym->auth.digest.data,
1651                         ses->digest_length);
1652                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1653                 sg->length = ses->digest_length;
1654         }
1655         sg->final = 1;
1656         cpu_to_hw_sg(sg);
1657
1658         return cf;
1659 }
1660
1661 static inline struct dpaa_sec_job *
1662 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1663 {
1664         struct rte_crypto_sym_op *sym = op->sym;
1665         struct dpaa_sec_job *cf;
1666         struct dpaa_sec_op_ctx *ctx;
1667         struct qm_sg_entry *sg;
1668         rte_iova_t src_start_addr, dst_start_addr;
1669         uint32_t length = 0;
1670         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1671                         ses->iv.offset);
1672
1673         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1674         if (sym->m_dst)
1675                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1676         else
1677                 dst_start_addr = src_start_addr;
1678
1679         ctx = dpaa_sec_alloc_ctx(ses, 7);
1680         if (!ctx)
1681                 return NULL;
1682
1683         cf = &ctx->job;
1684         ctx->op = op;
1685
1686         /* input */
1687         rte_prefetch0(cf->sg);
1688         sg = &cf->sg[2];
1689         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1690         if (is_encode(ses)) {
1691                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1692                 sg->length = ses->iv.length;
1693                 length += sg->length;
1694                 cpu_to_hw_sg(sg);
1695
1696                 sg++;
1697                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1698                 sg->length = sym->auth.data.length;
1699                 length += sg->length;
1700                 sg->final = 1;
1701                 cpu_to_hw_sg(sg);
1702         } else {
1703                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1704                 sg->length = ses->iv.length;
1705                 length += sg->length;
1706                 cpu_to_hw_sg(sg);
1707
1708                 sg++;
1709
1710                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1711                 sg->length = sym->auth.data.length;
1712                 length += sg->length;
1713                 cpu_to_hw_sg(sg);
1714
1715                 memcpy(ctx->digest, sym->auth.digest.data,
1716                        ses->digest_length);
1717                 sg++;
1718
1719                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1720                 sg->length = ses->digest_length;
1721                 length += sg->length;
1722                 sg->final = 1;
1723                 cpu_to_hw_sg(sg);
1724         }
1725         /* input compound frame */
1726         cf->sg[1].length = length;
1727         cf->sg[1].extension = 1;
1728         cf->sg[1].final = 1;
1729         cpu_to_hw_sg(&cf->sg[1]);
1730
1731         /* output */
1732         sg++;
1733         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1734         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1735         sg->length = sym->cipher.data.length;
1736         length = sg->length;
1737         if (is_encode(ses)) {
1738                 cpu_to_hw_sg(sg);
1739                 /* set auth output */
1740                 sg++;
1741                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1742                 sg->length = ses->digest_length;
1743                 length += sg->length;
1744         }
1745         sg->final = 1;
1746         cpu_to_hw_sg(sg);
1747
1748         /* output compound frame */
1749         cf->sg[0].length = length;
1750         cf->sg[0].extension = 1;
1751         cpu_to_hw_sg(&cf->sg[0]);
1752
1753         return cf;
1754 }
1755
1756 #ifdef RTE_LIB_SECURITY
1757 static inline struct dpaa_sec_job *
1758 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1759 {
1760         struct rte_crypto_sym_op *sym = op->sym;
1761         struct dpaa_sec_job *cf;
1762         struct dpaa_sec_op_ctx *ctx;
1763         struct qm_sg_entry *sg;
1764         phys_addr_t src_start_addr, dst_start_addr;
1765
1766         ctx = dpaa_sec_alloc_ctx(ses, 2);
1767         if (!ctx)
1768                 return NULL;
1769         cf = &ctx->job;
1770         ctx->op = op;
1771
1772         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1773
1774         if (sym->m_dst)
1775                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1776         else
1777                 dst_start_addr = src_start_addr;
1778
1779         /* input */
1780         sg = &cf->sg[1];
1781         qm_sg_entry_set64(sg, src_start_addr);
1782         sg->length = sym->m_src->pkt_len;
1783         sg->final = 1;
1784         cpu_to_hw_sg(sg);
1785
1786         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1787         /* output */
1788         sg = &cf->sg[0];
1789         qm_sg_entry_set64(sg, dst_start_addr);
1790         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1791         cpu_to_hw_sg(sg);
1792
1793         return cf;
1794 }
1795
1796 static inline struct dpaa_sec_job *
1797 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1798 {
1799         struct rte_crypto_sym_op *sym = op->sym;
1800         struct dpaa_sec_job *cf;
1801         struct dpaa_sec_op_ctx *ctx;
1802         struct qm_sg_entry *sg, *out_sg, *in_sg;
1803         struct rte_mbuf *mbuf;
1804         uint8_t req_segs;
1805         uint32_t in_len = 0, out_len = 0;
1806
1807         if (sym->m_dst)
1808                 mbuf = sym->m_dst;
1809         else
1810                 mbuf = sym->m_src;
1811
1812         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1813         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1814                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1815                                 MAX_SG_ENTRIES);
1816                 return NULL;
1817         }
1818
1819         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1820         if (!ctx)
1821                 return NULL;
1822         cf = &ctx->job;
1823         ctx->op = op;
1824         /* output */
1825         out_sg = &cf->sg[0];
1826         out_sg->extension = 1;
1827         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1828
1829         /* 1st seg */
1830         sg = &cf->sg[2];
1831         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1832         sg->offset = 0;
1833
1834         /* Successive segs */
1835         while (mbuf->next) {
1836                 sg->length = mbuf->data_len;
1837                 out_len += sg->length;
1838                 mbuf = mbuf->next;
1839                 cpu_to_hw_sg(sg);
1840                 sg++;
1841                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1842                 sg->offset = 0;
1843         }
1844         sg->length = mbuf->buf_len - mbuf->data_off;
1845         out_len += sg->length;
1846         sg->final = 1;
1847         cpu_to_hw_sg(sg);
1848
1849         out_sg->length = out_len;
1850         cpu_to_hw_sg(out_sg);
1851
1852         /* input */
1853         mbuf = sym->m_src;
1854         in_sg = &cf->sg[1];
1855         in_sg->extension = 1;
1856         in_sg->final = 1;
1857         in_len = mbuf->data_len;
1858
1859         sg++;
1860         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1861
1862         /* 1st seg */
1863         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1864         sg->length = mbuf->data_len;
1865         sg->offset = 0;
1866
1867         /* Successive segs */
1868         mbuf = mbuf->next;
1869         while (mbuf) {
1870                 cpu_to_hw_sg(sg);
1871                 sg++;
1872                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1873                 sg->length = mbuf->data_len;
1874                 sg->offset = 0;
1875                 in_len += sg->length;
1876                 mbuf = mbuf->next;
1877         }
1878         sg->final = 1;
1879         cpu_to_hw_sg(sg);
1880
1881         in_sg->length = in_len;
1882         cpu_to_hw_sg(in_sg);
1883
1884         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1885
1886         return cf;
1887 }
1888 #endif
1889
1890 static uint16_t
1891 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1892                        uint16_t nb_ops)
1893 {
1894         /* Function to transmit the frames to given device and queuepair */
1895         uint32_t loop;
1896         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1897         uint16_t num_tx = 0;
1898         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1899         uint32_t frames_to_send;
1900         struct rte_crypto_op *op;
1901         struct dpaa_sec_job *cf;
1902         dpaa_sec_session *ses;
1903         uint16_t auth_hdr_len, auth_tail_len;
1904         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1905         struct qman_fq *inq[DPAA_SEC_BURST];
1906
1907         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1908                 if (rte_dpaa_portal_init((void *)0)) {
1909                         DPAA_SEC_ERR("Failure in affining portal");
1910                         return 0;
1911                 }
1912         }
1913
1914         while (nb_ops) {
1915                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1916                                 DPAA_SEC_BURST : nb_ops;
1917                 for (loop = 0; loop < frames_to_send; loop++) {
1918                         op = *(ops++);
1919                         if (*dpaa_seqn(op->sym->m_src) != 0) {
1920                                 index = *dpaa_seqn(op->sym->m_src) - 1;
1921                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1922                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1923                                         flags[loop] = ((index & 0x0f) << 8);
1924                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1925                                         DPAA_PER_LCORE_DQRR_SIZE--;
1926                                         DPAA_PER_LCORE_DQRR_HELD &=
1927                                                                 ~(1 << index);
1928                                 }
1929                         }
1930
1931                         switch (op->sess_type) {
1932                         case RTE_CRYPTO_OP_WITH_SESSION:
1933                                 ses = (dpaa_sec_session *)
1934                                         get_sym_session_private_data(
1935                                                 op->sym->session,
1936                                                 dpaa_cryptodev_driver_id);
1937                                 break;
1938 #ifdef RTE_LIB_SECURITY
1939                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1940                                 ses = (dpaa_sec_session *)
1941                                         get_sec_session_private_data(
1942                                                         op->sym->sec_session);
1943                                 break;
1944 #endif
1945                         default:
1946                                 DPAA_SEC_DP_ERR(
1947                                         "sessionless crypto op not supported");
1948                                 frames_to_send = loop;
1949                                 nb_ops = loop;
1950                                 goto send_pkts;
1951                         }
1952
1953                         if (!ses) {
1954                                 DPAA_SEC_DP_ERR("session not available");
1955                                 frames_to_send = loop;
1956                                 nb_ops = loop;
1957                                 goto send_pkts;
1958                         }
1959
1960                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1961                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1962                                         frames_to_send = loop;
1963                                         nb_ops = loop;
1964                                         goto send_pkts;
1965                                 }
1966                         } else if (unlikely(ses->qp[rte_lcore_id() %
1967                                                 MAX_DPAA_CORES] != qp)) {
1968                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1969                                         " New qp = %p\n",
1970                                         ses->qp[rte_lcore_id() %
1971                                         MAX_DPAA_CORES], qp);
1972                                 frames_to_send = loop;
1973                                 nb_ops = loop;
1974                                 goto send_pkts;
1975                         }
1976
1977                         auth_hdr_len = op->sym->auth.data.length -
1978                                                 op->sym->cipher.data.length;
1979                         auth_tail_len = 0;
1980
1981                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1982                                   ((op->sym->m_dst == NULL) ||
1983                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1984                                 switch (ses->ctxt) {
1985 #ifdef RTE_LIB_SECURITY
1986                                 case DPAA_SEC_PDCP:
1987                                 case DPAA_SEC_IPSEC:
1988                                         cf = build_proto(op, ses);
1989                                         break;
1990 #endif
1991                                 case DPAA_SEC_AUTH:
1992                                         cf = build_auth_only(op, ses);
1993                                         break;
1994                                 case DPAA_SEC_CIPHER:
1995                                         cf = build_cipher_only(op, ses);
1996                                         break;
1997                                 case DPAA_SEC_AEAD:
1998                                         cf = build_cipher_auth_gcm(op, ses);
1999                                         auth_hdr_len = ses->auth_only_len;
2000                                         break;
2001                                 case DPAA_SEC_CIPHER_HASH:
2002                                         auth_hdr_len =
2003                                                 op->sym->cipher.data.offset
2004                                                 - op->sym->auth.data.offset;
2005                                         auth_tail_len =
2006                                                 op->sym->auth.data.length
2007                                                 - op->sym->cipher.data.length
2008                                                 - auth_hdr_len;
2009                                         cf = build_cipher_auth(op, ses);
2010                                         break;
2011                                 default:
2012                                         DPAA_SEC_DP_ERR("not supported ops");
2013                                         frames_to_send = loop;
2014                                         nb_ops = loop;
2015                                         goto send_pkts;
2016                                 }
2017                         } else {
2018                                 switch (ses->ctxt) {
2019 #ifdef RTE_LIB_SECURITY
2020                                 case DPAA_SEC_PDCP:
2021                                 case DPAA_SEC_IPSEC:
2022                                         cf = build_proto_sg(op, ses);
2023                                         break;
2024 #endif
2025                                 case DPAA_SEC_AUTH:
2026                                         cf = build_auth_only_sg(op, ses);
2027                                         break;
2028                                 case DPAA_SEC_CIPHER:
2029                                         cf = build_cipher_only_sg(op, ses);
2030                                         break;
2031                                 case DPAA_SEC_AEAD:
2032                                         cf = build_cipher_auth_gcm_sg(op, ses);
2033                                         auth_hdr_len = ses->auth_only_len;
2034                                         break;
2035                                 case DPAA_SEC_CIPHER_HASH:
2036                                         auth_hdr_len =
2037                                                 op->sym->cipher.data.offset
2038                                                 - op->sym->auth.data.offset;
2039                                         auth_tail_len =
2040                                                 op->sym->auth.data.length
2041                                                 - op->sym->cipher.data.length
2042                                                 - auth_hdr_len;
2043                                         cf = build_cipher_auth_sg(op, ses);
2044                                         break;
2045                                 default:
2046                                         DPAA_SEC_DP_ERR("not supported ops");
2047                                         frames_to_send = loop;
2048                                         nb_ops = loop;
2049                                         goto send_pkts;
2050                                 }
2051                         }
2052                         if (unlikely(!cf)) {
2053                                 frames_to_send = loop;
2054                                 nb_ops = loop;
2055                                 goto send_pkts;
2056                         }
2057
2058                         fd = &fds[loop];
2059                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2060                         fd->opaque_addr = 0;
2061                         fd->cmd = 0;
2062                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2063                         fd->_format1 = qm_fd_compound;
2064                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
2065
2066                         /* Auth_only_len is set as 0 in descriptor and it is
2067                          * overwritten here in the fd.cmd which will update
2068                          * the DPOVRD reg.
2069                          */
2070                         if (auth_hdr_len || auth_tail_len) {
2071                                 fd->cmd = 0x80000000;
2072                                 fd->cmd |=
2073                                         ((auth_tail_len << 16) | auth_hdr_len);
2074                         }
2075
2076 #ifdef RTE_LIB_SECURITY
2077                         /* In case of PDCP, per packet HFN is stored in
2078                          * mbuf priv after sym_op.
2079                          */
2080                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2081                                 fd->cmd = 0x80000000 |
2082                                         *((uint32_t *)((uint8_t *)op +
2083                                         ses->pdcp.hfn_ovd_offset));
2084                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2085                                         *((uint32_t *)((uint8_t *)op +
2086                                         ses->pdcp.hfn_ovd_offset)),
2087                                         ses->pdcp.hfn_ovd);
2088                         }
2089 #endif
2090                 }
2091 send_pkts:
2092                 loop = 0;
2093                 while (loop < frames_to_send) {
2094                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2095                                         &flags[loop], frames_to_send - loop);
2096                 }
2097                 nb_ops -= frames_to_send;
2098                 num_tx += frames_to_send;
2099         }
2100
2101         dpaa_qp->tx_pkts += num_tx;
2102         dpaa_qp->tx_errs += nb_ops - num_tx;
2103
2104         return num_tx;
2105 }
2106
2107 static uint16_t
2108 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2109                        uint16_t nb_ops)
2110 {
2111         uint16_t num_rx;
2112         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2113
2114         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2115                 if (rte_dpaa_portal_init((void *)0)) {
2116                         DPAA_SEC_ERR("Failure in affining portal");
2117                         return 0;
2118                 }
2119         }
2120
2121         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2122
2123         dpaa_qp->rx_pkts += num_rx;
2124         dpaa_qp->rx_errs += nb_ops - num_rx;
2125
2126         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2127
2128         return num_rx;
2129 }
2130
2131 /** Release queue pair */
2132 static int
2133 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2134                             uint16_t qp_id)
2135 {
2136         struct dpaa_sec_dev_private *internals;
2137         struct dpaa_sec_qp *qp = NULL;
2138
2139         PMD_INIT_FUNC_TRACE();
2140
2141         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2142
2143         internals = dev->data->dev_private;
2144         if (qp_id >= internals->max_nb_queue_pairs) {
2145                 DPAA_SEC_ERR("Max supported qpid %d",
2146                              internals->max_nb_queue_pairs);
2147                 return -EINVAL;
2148         }
2149
2150         qp = &internals->qps[qp_id];
2151         rte_mempool_free(qp->ctx_pool);
2152         qp->internals = NULL;
2153         dev->data->queue_pairs[qp_id] = NULL;
2154
2155         return 0;
2156 }
2157
2158 /** Setup a queue pair */
2159 static int
2160 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2161                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2162                 __rte_unused int socket_id)
2163 {
2164         struct dpaa_sec_dev_private *internals;
2165         struct dpaa_sec_qp *qp = NULL;
2166         char str[20];
2167
2168         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2169
2170         internals = dev->data->dev_private;
2171         if (qp_id >= internals->max_nb_queue_pairs) {
2172                 DPAA_SEC_ERR("Max supported qpid %d",
2173                              internals->max_nb_queue_pairs);
2174                 return -EINVAL;
2175         }
2176
2177         qp = &internals->qps[qp_id];
2178         qp->internals = internals;
2179         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2180                         dev->data->dev_id, qp_id);
2181         if (!qp->ctx_pool) {
2182                 qp->ctx_pool = rte_mempool_create((const char *)str,
2183                                                         CTX_POOL_NUM_BUFS,
2184                                                         CTX_POOL_BUF_SIZE,
2185                                                         CTX_POOL_CACHE_SIZE, 0,
2186                                                         NULL, NULL, NULL, NULL,
2187                                                         SOCKET_ID_ANY, 0);
2188                 if (!qp->ctx_pool) {
2189                         DPAA_SEC_ERR("%s create failed\n", str);
2190                         return -ENOMEM;
2191                 }
2192         } else
2193                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2194                                 dev->data->dev_id, qp_id);
2195         dev->data->queue_pairs[qp_id] = qp;
2196
2197         return 0;
2198 }
2199
2200 /** Returns the size of session structure */
2201 static unsigned int
2202 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2203 {
2204         PMD_INIT_FUNC_TRACE();
2205
2206         return sizeof(dpaa_sec_session);
2207 }
2208
2209 static int
2210 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2211                      struct rte_crypto_sym_xform *xform,
2212                      dpaa_sec_session *session)
2213 {
2214         session->ctxt = DPAA_SEC_CIPHER;
2215         session->cipher_alg = xform->cipher.algo;
2216         session->iv.length = xform->cipher.iv.length;
2217         session->iv.offset = xform->cipher.iv.offset;
2218         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2219                                                RTE_CACHE_LINE_SIZE);
2220         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2221                 DPAA_SEC_ERR("No Memory for cipher key");
2222                 return -ENOMEM;
2223         }
2224         session->cipher_key.length = xform->cipher.key.length;
2225
2226         memcpy(session->cipher_key.data, xform->cipher.key.data,
2227                xform->cipher.key.length);
2228         switch (xform->cipher.algo) {
2229         case RTE_CRYPTO_CIPHER_AES_CBC:
2230                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2231                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2232                 break;
2233         case RTE_CRYPTO_CIPHER_DES_CBC:
2234                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2235                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2236                 break;
2237         case RTE_CRYPTO_CIPHER_3DES_CBC:
2238                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2239                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2240                 break;
2241         case RTE_CRYPTO_CIPHER_AES_CTR:
2242                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2243                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2244                 break;
2245         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2246                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2247                 break;
2248         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2249                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2250                 break;
2251         default:
2252                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2253                               xform->cipher.algo);
2254                 return -ENOTSUP;
2255         }
2256         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2257                         DIR_ENC : DIR_DEC;
2258
2259         return 0;
2260 }
2261
2262 static int
2263 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2264                    struct rte_crypto_sym_xform *xform,
2265                    dpaa_sec_session *session)
2266 {
2267         session->ctxt = DPAA_SEC_AUTH;
2268         session->auth_alg = xform->auth.algo;
2269         session->auth_key.length = xform->auth.key.length;
2270         if (xform->auth.key.length) {
2271                 session->auth_key.data =
2272                                 rte_zmalloc(NULL, xform->auth.key.length,
2273                                              RTE_CACHE_LINE_SIZE);
2274                 if (session->auth_key.data == NULL) {
2275                         DPAA_SEC_ERR("No Memory for auth key");
2276                         return -ENOMEM;
2277                 }
2278                 memcpy(session->auth_key.data, xform->auth.key.data,
2279                                 xform->auth.key.length);
2280
2281         }
2282         session->digest_length = xform->auth.digest_length;
2283         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2284                 session->iv.offset = xform->auth.iv.offset;
2285                 session->iv.length = xform->auth.iv.length;
2286         }
2287
2288         switch (xform->auth.algo) {
2289         case RTE_CRYPTO_AUTH_SHA1:
2290                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2291                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2292                 break;
2293         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2294                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2295                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2296                 break;
2297         case RTE_CRYPTO_AUTH_MD5:
2298                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2299                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2300                 break;
2301         case RTE_CRYPTO_AUTH_MD5_HMAC:
2302                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2303                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2304                 break;
2305         case RTE_CRYPTO_AUTH_SHA224:
2306                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2307                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2308                 break;
2309         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2310                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2311                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2312                 break;
2313         case RTE_CRYPTO_AUTH_SHA256:
2314                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2315                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2316                 break;
2317         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2318                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2319                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2320                 break;
2321         case RTE_CRYPTO_AUTH_SHA384:
2322                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2323                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2324                 break;
2325         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2326                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2327                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2328                 break;
2329         case RTE_CRYPTO_AUTH_SHA512:
2330                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2331                 session->auth_key.algmode = OP_ALG_AAI_HASH;
2332                 break;
2333         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2334                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2335                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2336                 break;
2337         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2338                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2339                 session->auth_key.algmode = OP_ALG_AAI_F9;
2340                 break;
2341         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2342                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2343                 session->auth_key.algmode = OP_ALG_AAI_F9;
2344                 break;
2345         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2346                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2347                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2348                 break;
2349         case RTE_CRYPTO_AUTH_AES_CMAC:
2350                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2351                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2352                 break;
2353         default:
2354                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2355                               xform->auth.algo);
2356                 return -ENOTSUP;
2357         }
2358
2359         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2360                         DIR_ENC : DIR_DEC;
2361
2362         return 0;
2363 }
2364
2365 static int
2366 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2367                    struct rte_crypto_sym_xform *xform,
2368                    dpaa_sec_session *session)
2369 {
2370
2371         struct rte_crypto_cipher_xform *cipher_xform;
2372         struct rte_crypto_auth_xform *auth_xform;
2373
2374         session->ctxt = DPAA_SEC_CIPHER_HASH;
2375         if (session->auth_cipher_text) {
2376                 cipher_xform = &xform->cipher;
2377                 auth_xform = &xform->next->auth;
2378         } else {
2379                 cipher_xform = &xform->next->cipher;
2380                 auth_xform = &xform->auth;
2381         }
2382
2383         /* Set IV parameters */
2384         session->iv.offset = cipher_xform->iv.offset;
2385         session->iv.length = cipher_xform->iv.length;
2386
2387         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2388                                                RTE_CACHE_LINE_SIZE);
2389         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2390                 DPAA_SEC_ERR("No Memory for cipher key");
2391                 return -ENOMEM;
2392         }
2393         session->cipher_key.length = cipher_xform->key.length;
2394         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2395                                              RTE_CACHE_LINE_SIZE);
2396         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2397                 DPAA_SEC_ERR("No Memory for auth key");
2398                 return -ENOMEM;
2399         }
2400         session->auth_key.length = auth_xform->key.length;
2401         memcpy(session->cipher_key.data, cipher_xform->key.data,
2402                cipher_xform->key.length);
2403         memcpy(session->auth_key.data, auth_xform->key.data,
2404                auth_xform->key.length);
2405
2406         session->digest_length = auth_xform->digest_length;
2407         session->auth_alg = auth_xform->algo;
2408
2409         switch (auth_xform->algo) {
2410         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2411                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2412                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2413                 break;
2414         case RTE_CRYPTO_AUTH_MD5_HMAC:
2415                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2416                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2417                 break;
2418         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2419                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2420                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2421                 break;
2422         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2423                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2424                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2425                 break;
2426         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2427                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2428                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2429                 break;
2430         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2431                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2432                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2433                 break;
2434         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2435                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2436                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2437                 break;
2438         case RTE_CRYPTO_AUTH_AES_CMAC:
2439                 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2440                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2441                 break;
2442         default:
2443                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2444                               auth_xform->algo);
2445                 return -ENOTSUP;
2446         }
2447
2448         session->cipher_alg = cipher_xform->algo;
2449
2450         switch (cipher_xform->algo) {
2451         case RTE_CRYPTO_CIPHER_AES_CBC:
2452                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2453                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2454                 break;
2455         case RTE_CRYPTO_CIPHER_DES_CBC:
2456                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2457                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2458                 break;
2459         case RTE_CRYPTO_CIPHER_3DES_CBC:
2460                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2461                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2462                 break;
2463         case RTE_CRYPTO_CIPHER_AES_CTR:
2464                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2465                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2466                 break;
2467         default:
2468                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2469                               cipher_xform->algo);
2470                 return -ENOTSUP;
2471         }
2472         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2473                                 DIR_ENC : DIR_DEC;
2474         return 0;
2475 }
2476
2477 static int
2478 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2479                    struct rte_crypto_sym_xform *xform,
2480                    dpaa_sec_session *session)
2481 {
2482         session->aead_alg = xform->aead.algo;
2483         session->ctxt = DPAA_SEC_AEAD;
2484         session->iv.length = xform->aead.iv.length;
2485         session->iv.offset = xform->aead.iv.offset;
2486         session->auth_only_len = xform->aead.aad_length;
2487         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2488                                              RTE_CACHE_LINE_SIZE);
2489         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2490                 DPAA_SEC_ERR("No Memory for aead key\n");
2491                 return -ENOMEM;
2492         }
2493         session->aead_key.length = xform->aead.key.length;
2494         session->digest_length = xform->aead.digest_length;
2495
2496         memcpy(session->aead_key.data, xform->aead.key.data,
2497                xform->aead.key.length);
2498
2499         switch (session->aead_alg) {
2500         case RTE_CRYPTO_AEAD_AES_GCM:
2501                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2502                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2503                 break;
2504         default:
2505                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2506                 return -ENOTSUP;
2507         }
2508
2509         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2510                         DIR_ENC : DIR_DEC;
2511
2512         return 0;
2513 }
2514
2515 static struct qman_fq *
2516 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2517 {
2518         unsigned int i;
2519
2520         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2521                 if (qi->inq_attach[i] == 0) {
2522                         qi->inq_attach[i] = 1;
2523                         return &qi->inq[i];
2524                 }
2525         }
2526         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2527
2528         return NULL;
2529 }
2530
2531 static int
2532 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2533 {
2534         unsigned int i;
2535
2536         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2537                 if (&qi->inq[i] == fq) {
2538                         if (qman_retire_fq(fq, NULL) != 0)
2539                                 DPAA_SEC_DEBUG("Queue is not retired\n");
2540                         qman_oos_fq(fq);
2541                         qi->inq_attach[i] = 0;
2542                         return 0;
2543                 }
2544         }
2545         return -1;
2546 }
2547
2548 int
2549 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2550 {
2551         int ret;
2552
2553         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2554         ret = dpaa_sec_prep_cdb(sess);
2555         if (ret) {
2556                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2557                 return ret;
2558         }
2559         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2560                 ret = rte_dpaa_portal_init((void *)0);
2561                 if (ret) {
2562                         DPAA_SEC_ERR("Failure in affining portal");
2563                         return ret;
2564                 }
2565         }
2566         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2567                                rte_dpaa_mem_vtop(&sess->cdb),
2568                                qman_fq_fqid(&qp->outq));
2569         if (ret)
2570                 DPAA_SEC_ERR("Unable to init sec queue");
2571
2572         return ret;
2573 }
2574
2575 static inline void
2576 free_session_data(dpaa_sec_session *s)
2577 {
2578         if (is_aead(s))
2579                 rte_free(s->aead_key.data);
2580         else {
2581                 rte_free(s->auth_key.data);
2582                 rte_free(s->cipher_key.data);
2583         }
2584         memset(s, 0, sizeof(dpaa_sec_session));
2585 }
2586
2587 static int
2588 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2589                             struct rte_crypto_sym_xform *xform, void *sess)
2590 {
2591         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2592         dpaa_sec_session *session = sess;
2593         uint32_t i;
2594         int ret;
2595
2596         PMD_INIT_FUNC_TRACE();
2597
2598         if (unlikely(sess == NULL)) {
2599                 DPAA_SEC_ERR("invalid session struct");
2600                 return -EINVAL;
2601         }
2602         memset(session, 0, sizeof(dpaa_sec_session));
2603
2604         /* Default IV length = 0 */
2605         session->iv.length = 0;
2606
2607         /* Cipher Only */
2608         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2609                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2610                 ret = dpaa_sec_cipher_init(dev, xform, session);
2611
2612         /* Authentication Only */
2613         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2614                    xform->next == NULL) {
2615                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2616                 session->ctxt = DPAA_SEC_AUTH;
2617                 ret = dpaa_sec_auth_init(dev, xform, session);
2618
2619         /* Cipher then Authenticate */
2620         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2621                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2622                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2623                         session->auth_cipher_text = 1;
2624                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2625                                 ret = dpaa_sec_auth_init(dev, xform, session);
2626                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2627                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2628                         else
2629                                 ret = dpaa_sec_chain_init(dev, xform, session);
2630                 } else {
2631                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2632                         return -ENOTSUP;
2633                 }
2634         /* Authenticate then Cipher */
2635         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2636                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2637                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2638                         session->auth_cipher_text = 0;
2639                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2640                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2641                         else if (xform->next->cipher.algo
2642                                         == RTE_CRYPTO_CIPHER_NULL)
2643                                 ret = dpaa_sec_auth_init(dev, xform, session);
2644                         else
2645                                 ret = dpaa_sec_chain_init(dev, xform, session);
2646                 } else {
2647                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2648                         return -ENOTSUP;
2649                 }
2650
2651         /* AEAD operation for AES-GCM kind of Algorithms */
2652         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2653                    xform->next == NULL) {
2654                 ret = dpaa_sec_aead_init(dev, xform, session);
2655
2656         } else {
2657                 DPAA_SEC_ERR("Invalid crypto type");
2658                 return -EINVAL;
2659         }
2660         if (ret) {
2661                 DPAA_SEC_ERR("unable to init session");
2662                 goto err1;
2663         }
2664
2665         rte_spinlock_lock(&internals->lock);
2666         for (i = 0; i < MAX_DPAA_CORES; i++) {
2667                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2668                 if (session->inq[i] == NULL) {
2669                         DPAA_SEC_ERR("unable to attach sec queue");
2670                         rte_spinlock_unlock(&internals->lock);
2671                         ret = -EBUSY;
2672                         goto err1;
2673                 }
2674         }
2675         rte_spinlock_unlock(&internals->lock);
2676
2677         return 0;
2678
2679 err1:
2680         free_session_data(session);
2681         return ret;
2682 }
2683
2684 static int
2685 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2686                 struct rte_crypto_sym_xform *xform,
2687                 struct rte_cryptodev_sym_session *sess,
2688                 struct rte_mempool *mempool)
2689 {
2690         void *sess_private_data;
2691         int ret;
2692
2693         PMD_INIT_FUNC_TRACE();
2694
2695         if (rte_mempool_get(mempool, &sess_private_data)) {
2696                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2697                 return -ENOMEM;
2698         }
2699
2700         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2701         if (ret != 0) {
2702                 DPAA_SEC_ERR("failed to configure session parameters");
2703
2704                 /* Return session to mempool */
2705                 rte_mempool_put(mempool, sess_private_data);
2706                 return ret;
2707         }
2708
2709         set_sym_session_private_data(sess, dev->driver_id,
2710                         sess_private_data);
2711
2712
2713         return 0;
2714 }
2715
2716 static inline void
2717 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2718 {
2719         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2720         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2721         uint8_t i;
2722
2723         for (i = 0; i < MAX_DPAA_CORES; i++) {
2724                 if (s->inq[i])
2725                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2726                 s->inq[i] = NULL;
2727                 s->qp[i] = NULL;
2728         }
2729         free_session_data(s);
2730         rte_mempool_put(sess_mp, (void *)s);
2731 }
2732
2733 /** Clear the memory of session so it doesn't leave key material behind */
2734 static void
2735 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2736                 struct rte_cryptodev_sym_session *sess)
2737 {
2738         PMD_INIT_FUNC_TRACE();
2739         uint8_t index = dev->driver_id;
2740         void *sess_priv = get_sym_session_private_data(sess, index);
2741         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2742
2743         if (sess_priv) {
2744                 free_session_memory(dev, s);
2745                 set_sym_session_private_data(sess, index, NULL);
2746         }
2747 }
2748
2749 #ifdef RTE_LIB_SECURITY
2750 static int
2751 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2752                         struct rte_security_ipsec_xform *ipsec_xform,
2753                         dpaa_sec_session *session)
2754 {
2755         PMD_INIT_FUNC_TRACE();
2756
2757         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2758                                                RTE_CACHE_LINE_SIZE);
2759         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2760                 DPAA_SEC_ERR("No Memory for aead key");
2761                 return -ENOMEM;
2762         }
2763         memcpy(session->aead_key.data, aead_xform->key.data,
2764                aead_xform->key.length);
2765
2766         session->digest_length = aead_xform->digest_length;
2767         session->aead_key.length = aead_xform->key.length;
2768
2769         switch (aead_xform->algo) {
2770         case RTE_CRYPTO_AEAD_AES_GCM:
2771                 switch (session->digest_length) {
2772                 case 8:
2773                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2774                         break;
2775                 case 12:
2776                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2777                         break;
2778                 case 16:
2779                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2780                         break;
2781                 default:
2782                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2783                                      session->digest_length);
2784                         return -EINVAL;
2785                 }
2786                 if (session->dir == DIR_ENC) {
2787                         memcpy(session->encap_pdb.gcm.salt,
2788                                 (uint8_t *)&(ipsec_xform->salt), 4);
2789                 } else {
2790                         memcpy(session->decap_pdb.gcm.salt,
2791                                 (uint8_t *)&(ipsec_xform->salt), 4);
2792                 }
2793                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2794                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2795                 break;
2796         default:
2797                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2798                               aead_xform->algo);
2799                 return -ENOTSUP;
2800         }
2801         return 0;
2802 }
2803
2804 static int
2805 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2806         struct rte_crypto_auth_xform *auth_xform,
2807         struct rte_security_ipsec_xform *ipsec_xform,
2808         dpaa_sec_session *session)
2809 {
2810         if (cipher_xform) {
2811                 session->cipher_key.data = rte_zmalloc(NULL,
2812                                                        cipher_xform->key.length,
2813                                                        RTE_CACHE_LINE_SIZE);
2814                 if (session->cipher_key.data == NULL &&
2815                                 cipher_xform->key.length > 0) {
2816                         DPAA_SEC_ERR("No Memory for cipher key");
2817                         return -ENOMEM;
2818                 }
2819
2820                 session->cipher_key.length = cipher_xform->key.length;
2821                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2822                                 cipher_xform->key.length);
2823                 session->cipher_alg = cipher_xform->algo;
2824         } else {
2825                 session->cipher_key.data = NULL;
2826                 session->cipher_key.length = 0;
2827                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2828         }
2829
2830         if (auth_xform) {
2831                 session->auth_key.data = rte_zmalloc(NULL,
2832                                                 auth_xform->key.length,
2833                                                 RTE_CACHE_LINE_SIZE);
2834                 if (session->auth_key.data == NULL &&
2835                                 auth_xform->key.length > 0) {
2836                         DPAA_SEC_ERR("No Memory for auth key");
2837                         return -ENOMEM;
2838                 }
2839                 session->auth_key.length = auth_xform->key.length;
2840                 memcpy(session->auth_key.data, auth_xform->key.data,
2841                                 auth_xform->key.length);
2842                 session->auth_alg = auth_xform->algo;
2843                 session->digest_length = auth_xform->digest_length;
2844         } else {
2845                 session->auth_key.data = NULL;
2846                 session->auth_key.length = 0;
2847                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2848         }
2849
2850         switch (session->auth_alg) {
2851         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2852                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2853                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2854                 break;
2855         case RTE_CRYPTO_AUTH_MD5_HMAC:
2856                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2857                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2858                 break;
2859         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2860                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2861                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2862                 if (session->digest_length != 16)
2863                         DPAA_SEC_WARN(
2864                         "+++Using sha256-hmac truncated len is non-standard,"
2865                         "it will not work with lookaside proto");
2866                 break;
2867         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2868                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2869                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2870                 break;
2871         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2872                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2873                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2874                 break;
2875         case RTE_CRYPTO_AUTH_AES_CMAC:
2876                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2877                 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2878                 break;
2879         case RTE_CRYPTO_AUTH_NULL:
2880                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2881                 break;
2882         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2883                 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2884                 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2885                 break;
2886         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2887         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2888         case RTE_CRYPTO_AUTH_SHA1:
2889         case RTE_CRYPTO_AUTH_SHA256:
2890         case RTE_CRYPTO_AUTH_SHA512:
2891         case RTE_CRYPTO_AUTH_SHA224:
2892         case RTE_CRYPTO_AUTH_SHA384:
2893         case RTE_CRYPTO_AUTH_MD5:
2894         case RTE_CRYPTO_AUTH_AES_GMAC:
2895         case RTE_CRYPTO_AUTH_KASUMI_F9:
2896         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2897         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2898                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2899                               session->auth_alg);
2900                 return -ENOTSUP;
2901         default:
2902                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2903                               session->auth_alg);
2904                 return -ENOTSUP;
2905         }
2906
2907         switch (session->cipher_alg) {
2908         case RTE_CRYPTO_CIPHER_AES_CBC:
2909                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2910                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2911                 break;
2912         case RTE_CRYPTO_CIPHER_DES_CBC:
2913                 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2914                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2915                 break;
2916         case RTE_CRYPTO_CIPHER_3DES_CBC:
2917                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2918                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2919                 break;
2920         case RTE_CRYPTO_CIPHER_AES_CTR:
2921                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2922                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2923                 if (session->dir == DIR_ENC) {
2924                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2925                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2926                 } else {
2927                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2928                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2929                 }
2930                 break;
2931         case RTE_CRYPTO_CIPHER_NULL:
2932                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2933                 break;
2934         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2935         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2936         case RTE_CRYPTO_CIPHER_3DES_ECB:
2937         case RTE_CRYPTO_CIPHER_AES_ECB:
2938         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2939                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2940                               session->cipher_alg);
2941                 return -ENOTSUP;
2942         default:
2943                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2944                               session->cipher_alg);
2945                 return -ENOTSUP;
2946         }
2947
2948         return 0;
2949 }
2950
2951 static int
2952 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2953                            struct rte_security_session_conf *conf,
2954                            void *sess)
2955 {
2956         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2957         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2958         struct rte_crypto_auth_xform *auth_xform = NULL;
2959         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2960         struct rte_crypto_aead_xform *aead_xform = NULL;
2961         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2962         uint32_t i;
2963         int ret;
2964
2965         PMD_INIT_FUNC_TRACE();
2966
2967         memset(session, 0, sizeof(dpaa_sec_session));
2968         session->proto_alg = conf->protocol;
2969         session->ctxt = DPAA_SEC_IPSEC;
2970
2971         if (ipsec_xform->life.bytes_hard_limit != 0 ||
2972             ipsec_xform->life.bytes_soft_limit != 0 ||
2973             ipsec_xform->life.packets_hard_limit != 0 ||
2974             ipsec_xform->life.packets_soft_limit != 0)
2975                 return -ENOTSUP;
2976
2977         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2978                 session->dir = DIR_ENC;
2979         else
2980                 session->dir = DIR_DEC;
2981
2982         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2983                 cipher_xform = &conf->crypto_xform->cipher;
2984                 if (conf->crypto_xform->next)
2985                         auth_xform = &conf->crypto_xform->next->auth;
2986                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2987                                         ipsec_xform, session);
2988         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2989                 auth_xform = &conf->crypto_xform->auth;
2990                 if (conf->crypto_xform->next)
2991                         cipher_xform = &conf->crypto_xform->next->cipher;
2992                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2993                                         ipsec_xform, session);
2994         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2995                 aead_xform = &conf->crypto_xform->aead;
2996                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2997                                         ipsec_xform, session);
2998         } else {
2999                 DPAA_SEC_ERR("XFORM not specified");
3000                 ret = -EINVAL;
3001                 goto out;
3002         }
3003         if (ret) {
3004                 DPAA_SEC_ERR("Failed to process xform");
3005                 goto out;
3006         }
3007
3008         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3009                 if (ipsec_xform->tunnel.type ==
3010                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3011                         session->ip4_hdr.ip_v = IPVERSION;
3012                         session->ip4_hdr.ip_hl = 5;
3013                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
3014                                                 sizeof(session->ip4_hdr));
3015                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3016                         session->ip4_hdr.ip_id = 0;
3017                         session->ip4_hdr.ip_off = 0;
3018                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3019                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
3020                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3021                                         IPPROTO_ESP : IPPROTO_AH;
3022                         session->ip4_hdr.ip_sum = 0;
3023                         session->ip4_hdr.ip_src =
3024                                         ipsec_xform->tunnel.ipv4.src_ip;
3025                         session->ip4_hdr.ip_dst =
3026                                         ipsec_xform->tunnel.ipv4.dst_ip;
3027                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
3028                                                 (void *)&session->ip4_hdr,
3029                                                 sizeof(struct ip));
3030                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
3031                 } else if (ipsec_xform->tunnel.type ==
3032                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3033                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3034                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
3035                                 ((ipsec_xform->tunnel.ipv6.dscp <<
3036                                         RTE_IPV6_HDR_TC_SHIFT) &
3037                                         RTE_IPV6_HDR_TC_MASK) |
3038                                 ((ipsec_xform->tunnel.ipv6.flabel <<
3039                                         RTE_IPV6_HDR_FL_SHIFT) &
3040                                         RTE_IPV6_HDR_FL_MASK));
3041                         /* Payload length will be updated by HW */
3042                         session->ip6_hdr.payload_len = 0;
3043                         session->ip6_hdr.hop_limits =
3044                                         ipsec_xform->tunnel.ipv6.hlimit;
3045                         session->ip6_hdr.proto = (ipsec_xform->proto ==
3046                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3047                                         IPPROTO_ESP : IPPROTO_AH;
3048                         memcpy(&session->ip6_hdr.src_addr,
3049                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
3050                         memcpy(&session->ip6_hdr.dst_addr,
3051                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3052                         session->encap_pdb.ip_hdr_len =
3053                                                 sizeof(struct rte_ipv6_hdr);
3054                 }
3055
3056                 session->encap_pdb.options =
3057                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3058                         PDBOPTS_ESP_OIHI_PDB_INL |
3059                         PDBOPTS_ESP_IVSRC |
3060                         PDBHMO_ESP_SNR;
3061                 if (ipsec_xform->options.dec_ttl)
3062                         session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3063                 if (ipsec_xform->options.esn)
3064                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3065                 session->encap_pdb.spi = ipsec_xform->spi;
3066
3067         } else if (ipsec_xform->direction ==
3068                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3069                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3070                         session->decap_pdb.options = sizeof(struct ip) << 16;
3071                 else
3072                         session->decap_pdb.options =
3073                                         sizeof(struct rte_ipv6_hdr) << 16;
3074                 if (ipsec_xform->options.esn)
3075                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3076                 if (ipsec_xform->replay_win_sz) {
3077                         uint32_t win_sz;
3078                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3079
3080                         switch (win_sz) {
3081                         case 1:
3082                         case 2:
3083                         case 4:
3084                         case 8:
3085                         case 16:
3086                         case 32:
3087                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3088                                 break;
3089                         case 64:
3090                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3091                                 break;
3092                         default:
3093                                 session->decap_pdb.options |=
3094                                                         PDBOPTS_ESP_ARS128;
3095                         }
3096                 }
3097         } else
3098                 goto out;
3099         rte_spinlock_lock(&internals->lock);
3100         for (i = 0; i < MAX_DPAA_CORES; i++) {
3101                 session->inq[i] = dpaa_sec_attach_rxq(internals);
3102                 if (session->inq[i] == NULL) {
3103                         DPAA_SEC_ERR("unable to attach sec queue");
3104                         rte_spinlock_unlock(&internals->lock);
3105                         goto out;
3106                 }
3107         }
3108         rte_spinlock_unlock(&internals->lock);
3109
3110         return 0;
3111 out:
3112         free_session_data(session);
3113         return -1;
3114 }
3115
3116 static int
3117 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3118                           struct rte_security_session_conf *conf,
3119                           void *sess)
3120 {
3121         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3122         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3123         struct rte_crypto_auth_xform *auth_xform = NULL;
3124         struct rte_crypto_cipher_xform *cipher_xform = NULL;
3125         dpaa_sec_session *session = (dpaa_sec_session *)sess;
3126         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3127         uint32_t i;
3128         int ret;
3129
3130         PMD_INIT_FUNC_TRACE();
3131
3132         memset(session, 0, sizeof(dpaa_sec_session));
3133
3134         /* find xfrm types */
3135         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3136                 cipher_xform = &xform->cipher;
3137                 if (xform->next != NULL &&
3138                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3139                         auth_xform = &xform->next->auth;
3140         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3141                 auth_xform = &xform->auth;
3142                 if (xform->next != NULL &&
3143                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3144                         cipher_xform = &xform->next->cipher;
3145         } else {
3146                 DPAA_SEC_ERR("Invalid crypto type");
3147                 return -EINVAL;
3148         }
3149
3150         session->proto_alg = conf->protocol;
3151         session->ctxt = DPAA_SEC_PDCP;
3152
3153         if (cipher_xform) {
3154                 switch (cipher_xform->algo) {
3155                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3156                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3157                         break;
3158                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3159                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3160                         break;
3161                 case RTE_CRYPTO_CIPHER_AES_CTR:
3162                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3163                         break;
3164                 case RTE_CRYPTO_CIPHER_NULL:
3165                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3166                         break;
3167                 default:
3168                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3169                                       session->cipher_alg);
3170                         return -EINVAL;
3171                 }
3172
3173                 session->cipher_key.data = rte_zmalloc(NULL,
3174                                                cipher_xform->key.length,
3175                                                RTE_CACHE_LINE_SIZE);
3176                 if (session->cipher_key.data == NULL &&
3177                                 cipher_xform->key.length > 0) {
3178                         DPAA_SEC_ERR("No Memory for cipher key");
3179                         return -ENOMEM;
3180                 }
3181                 session->cipher_key.length = cipher_xform->key.length;
3182                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3183                         cipher_xform->key.length);
3184                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3185                                         DIR_ENC : DIR_DEC;
3186                 session->cipher_alg = cipher_xform->algo;
3187         } else {
3188                 session->cipher_key.data = NULL;
3189                 session->cipher_key.length = 0;
3190                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3191                 session->dir = DIR_ENC;
3192         }
3193
3194         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3195                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3196                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3197                         DPAA_SEC_ERR(
3198                                 "PDCP Seq Num size should be 5/12 bits for cmode");
3199                         ret = -EINVAL;
3200                         goto out;
3201                 }
3202         }
3203
3204         if (auth_xform) {
3205                 switch (auth_xform->algo) {
3206                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3207                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3208                         break;
3209                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3210                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3211                         break;
3212                 case RTE_CRYPTO_AUTH_AES_CMAC:
3213                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3214                         break;
3215                 case RTE_CRYPTO_AUTH_NULL:
3216                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3217                         break;
3218                 default:
3219                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3220                                       session->auth_alg);
3221                         rte_free(session->cipher_key.data);
3222                         return -EINVAL;
3223                 }
3224                 session->auth_key.data = rte_zmalloc(NULL,
3225                                                      auth_xform->key.length,
3226                                                      RTE_CACHE_LINE_SIZE);
3227                 if (!session->auth_key.data &&
3228                     auth_xform->key.length > 0) {
3229                         DPAA_SEC_ERR("No Memory for auth key");
3230                         rte_free(session->cipher_key.data);
3231                         return -ENOMEM;
3232                 }
3233                 session->auth_key.length = auth_xform->key.length;
3234                 memcpy(session->auth_key.data, auth_xform->key.data,
3235                        auth_xform->key.length);
3236                 session->auth_alg = auth_xform->algo;
3237         } else {
3238                 session->auth_key.data = NULL;
3239                 session->auth_key.length = 0;
3240                 session->auth_alg = 0;
3241         }
3242         session->pdcp.domain = pdcp_xform->domain;
3243         session->pdcp.bearer = pdcp_xform->bearer;
3244         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3245         session->pdcp.sn_size = pdcp_xform->sn_size;
3246         session->pdcp.hfn = pdcp_xform->hfn;
3247         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3248         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3249         session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3250         if (cipher_xform)
3251                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3252
3253         rte_spinlock_lock(&dev_priv->lock);
3254         for (i = 0; i < MAX_DPAA_CORES; i++) {
3255                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3256                 if (session->inq[i] == NULL) {
3257                         DPAA_SEC_ERR("unable to attach sec queue");
3258                         rte_spinlock_unlock(&dev_priv->lock);
3259                         ret = -EBUSY;
3260                         goto out;
3261                 }
3262         }
3263         rte_spinlock_unlock(&dev_priv->lock);
3264         return 0;
3265 out:
3266         rte_free(session->auth_key.data);
3267         rte_free(session->cipher_key.data);
3268         memset(session, 0, sizeof(dpaa_sec_session));
3269         return ret;
3270 }
3271
3272 static int
3273 dpaa_sec_security_session_create(void *dev,
3274                                  struct rte_security_session_conf *conf,
3275                                  struct rte_security_session *sess,
3276                                  struct rte_mempool *mempool)
3277 {
3278         void *sess_private_data;
3279         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3280         int ret;
3281
3282         if (rte_mempool_get(mempool, &sess_private_data)) {
3283                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3284                 return -ENOMEM;
3285         }
3286
3287         switch (conf->protocol) {
3288         case RTE_SECURITY_PROTOCOL_IPSEC:
3289                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3290                                 sess_private_data);
3291                 break;
3292         case RTE_SECURITY_PROTOCOL_PDCP:
3293                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3294                                 sess_private_data);
3295                 break;
3296         case RTE_SECURITY_PROTOCOL_MACSEC:
3297                 return -ENOTSUP;
3298         default:
3299                 return -EINVAL;
3300         }
3301         if (ret != 0) {
3302                 DPAA_SEC_ERR("failed to configure session parameters");
3303                 /* Return session to mempool */
3304                 rte_mempool_put(mempool, sess_private_data);
3305                 return ret;
3306         }
3307
3308         set_sec_session_private_data(sess, sess_private_data);
3309
3310         return ret;
3311 }
3312
3313 /** Clear the memory of session so it doesn't leave key material behind */
3314 static int
3315 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3316                 struct rte_security_session *sess)
3317 {
3318         PMD_INIT_FUNC_TRACE();
3319         void *sess_priv = get_sec_session_private_data(sess);
3320         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3321
3322         if (sess_priv) {
3323                 free_session_memory((struct rte_cryptodev *)dev, s);
3324                 set_sec_session_private_data(sess, NULL);
3325         }
3326         return 0;
3327 }
3328 #endif
3329 static int
3330 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3331                        struct rte_cryptodev_config *config __rte_unused)
3332 {
3333         PMD_INIT_FUNC_TRACE();
3334
3335         return 0;
3336 }
3337
3338 static int
3339 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3340 {
3341         PMD_INIT_FUNC_TRACE();
3342         return 0;
3343 }
3344
3345 static void
3346 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3347 {
3348         PMD_INIT_FUNC_TRACE();
3349 }
3350
3351 static int
3352 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3353 {
3354         PMD_INIT_FUNC_TRACE();
3355
3356         if (dev == NULL)
3357                 return -ENOMEM;
3358
3359         return 0;
3360 }
3361
3362 static void
3363 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3364                        struct rte_cryptodev_info *info)
3365 {
3366         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3367
3368         PMD_INIT_FUNC_TRACE();
3369         if (info != NULL) {
3370                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3371                 info->feature_flags = dev->feature_flags;
3372                 info->capabilities = dpaa_sec_capabilities;
3373                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3374                 info->driver_id = dpaa_cryptodev_driver_id;
3375         }
3376 }
3377
3378 static enum qman_cb_dqrr_result
3379 dpaa_sec_process_parallel_event(void *event,
3380                         struct qman_portal *qm __always_unused,
3381                         struct qman_fq *outq,
3382                         const struct qm_dqrr_entry *dqrr,
3383                         void **bufs)
3384 {
3385         const struct qm_fd *fd;
3386         struct dpaa_sec_job *job;
3387         struct dpaa_sec_op_ctx *ctx;
3388         struct rte_event *ev = (struct rte_event *)event;
3389
3390         fd = &dqrr->fd;
3391
3392         /* sg is embedded in an op ctx,
3393          * sg[0] is for output
3394          * sg[1] for input
3395          */
3396         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3397
3398         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3399         ctx->fd_status = fd->status;
3400         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3401                 struct qm_sg_entry *sg_out;
3402                 uint32_t len;
3403
3404                 sg_out = &job->sg[0];
3405                 hw_sg_to_cpu(sg_out);
3406                 len = sg_out->length;
3407                 ctx->op->sym->m_src->pkt_len = len;
3408                 ctx->op->sym->m_src->data_len = len;
3409         }
3410         if (!ctx->fd_status) {
3411                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3412         } else {
3413                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3414                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3415         }
3416         ev->event_ptr = (void *)ctx->op;
3417
3418         ev->flow_id = outq->ev.flow_id;
3419         ev->sub_event_type = outq->ev.sub_event_type;
3420         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3421         ev->op = RTE_EVENT_OP_NEW;
3422         ev->sched_type = outq->ev.sched_type;
3423         ev->queue_id = outq->ev.queue_id;
3424         ev->priority = outq->ev.priority;
3425         *bufs = (void *)ctx->op;
3426
3427         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3428
3429         return qman_cb_dqrr_consume;
3430 }
3431
3432 static enum qman_cb_dqrr_result
3433 dpaa_sec_process_atomic_event(void *event,
3434                         struct qman_portal *qm __rte_unused,
3435                         struct qman_fq *outq,
3436                         const struct qm_dqrr_entry *dqrr,
3437                         void **bufs)
3438 {
3439         u8 index;
3440         const struct qm_fd *fd;
3441         struct dpaa_sec_job *job;
3442         struct dpaa_sec_op_ctx *ctx;
3443         struct rte_event *ev = (struct rte_event *)event;
3444
3445         fd = &dqrr->fd;
3446
3447         /* sg is embedded in an op ctx,
3448          * sg[0] is for output
3449          * sg[1] for input
3450          */
3451         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3452
3453         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3454         ctx->fd_status = fd->status;
3455         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3456                 struct qm_sg_entry *sg_out;
3457                 uint32_t len;
3458
3459                 sg_out = &job->sg[0];
3460                 hw_sg_to_cpu(sg_out);
3461                 len = sg_out->length;
3462                 ctx->op->sym->m_src->pkt_len = len;
3463                 ctx->op->sym->m_src->data_len = len;
3464         }
3465         if (!ctx->fd_status) {
3466                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3467         } else {
3468                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3469                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3470         }
3471         ev->event_ptr = (void *)ctx->op;
3472         ev->flow_id = outq->ev.flow_id;
3473         ev->sub_event_type = outq->ev.sub_event_type;
3474         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3475         ev->op = RTE_EVENT_OP_NEW;
3476         ev->sched_type = outq->ev.sched_type;
3477         ev->queue_id = outq->ev.queue_id;
3478         ev->priority = outq->ev.priority;
3479
3480         /* Save active dqrr entries */
3481         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3482         DPAA_PER_LCORE_DQRR_SIZE++;
3483         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3484         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3485         ev->impl_opaque = index + 1;
3486         *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3487         *bufs = (void *)ctx->op;
3488
3489         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3490
3491         return qman_cb_dqrr_defer;
3492 }
3493
3494 int
3495 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3496                 int qp_id,
3497                 uint16_t ch_id,
3498                 const struct rte_event *event)
3499 {
3500         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3501         struct qm_mcc_initfq opts = {0};
3502
3503         int ret;
3504
3505         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3506                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3507         opts.fqd.dest.channel = ch_id;
3508
3509         switch (event->sched_type) {
3510         case RTE_SCHED_TYPE_ATOMIC:
3511                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3512                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3513                  * configuration with HOLD_ACTIVE setting
3514                  */
3515                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3516                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3517                 break;
3518         case RTE_SCHED_TYPE_ORDERED:
3519                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3520                 return -ENOTSUP;
3521         default:
3522                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3523                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3524                 break;
3525         }
3526
3527         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3528         if (unlikely(ret)) {
3529                 DPAA_SEC_ERR("unable to init caam source fq!");
3530                 return ret;
3531         }
3532
3533         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3534
3535         return 0;
3536 }
3537
3538 int
3539 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3540                         int qp_id)
3541 {
3542         struct qm_mcc_initfq opts = {0};
3543         int ret;
3544         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3545
3546         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3547                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3548         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3549         qp->outq.cb.ern  = ern_sec_fq_handler;
3550         qman_retire_fq(&qp->outq, NULL);
3551         qman_oos_fq(&qp->outq);
3552         ret = qman_init_fq(&qp->outq, 0, &opts);
3553         if (ret)
3554                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3555         qp->outq.cb.dqrr = NULL;
3556
3557         return ret;
3558 }
3559
3560 static struct rte_cryptodev_ops crypto_ops = {
3561         .dev_configure        = dpaa_sec_dev_configure,
3562         .dev_start            = dpaa_sec_dev_start,
3563         .dev_stop             = dpaa_sec_dev_stop,
3564         .dev_close            = dpaa_sec_dev_close,
3565         .dev_infos_get        = dpaa_sec_dev_infos_get,
3566         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3567         .queue_pair_release   = dpaa_sec_queue_pair_release,
3568         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3569         .sym_session_configure    = dpaa_sec_sym_session_configure,
3570         .sym_session_clear        = dpaa_sec_sym_session_clear,
3571         /* Raw data-path API related operations */
3572         .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3573         .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3574 };
3575
3576 #ifdef RTE_LIB_SECURITY
3577 static const struct rte_security_capability *
3578 dpaa_sec_capabilities_get(void *device __rte_unused)
3579 {
3580         return dpaa_sec_security_cap;
3581 }
3582
3583 static const struct rte_security_ops dpaa_sec_security_ops = {
3584         .session_create = dpaa_sec_security_session_create,
3585         .session_update = NULL,
3586         .session_stats_get = NULL,
3587         .session_destroy = dpaa_sec_security_session_destroy,
3588         .set_pkt_metadata = NULL,
3589         .capabilities_get = dpaa_sec_capabilities_get
3590 };
3591 #endif
3592 static int
3593 dpaa_sec_uninit(struct rte_cryptodev *dev)
3594 {
3595         struct dpaa_sec_dev_private *internals;
3596
3597         if (dev == NULL)
3598                 return -ENODEV;
3599
3600         internals = dev->data->dev_private;
3601         rte_free(dev->security_ctx);
3602
3603         rte_free(internals);
3604
3605         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3606                       dev->data->name, rte_socket_id());
3607
3608         return 0;
3609 }
3610
3611 static int
3612 check_devargs_handler(__rte_unused const char *key, const char *value,
3613                       __rte_unused void *opaque)
3614 {
3615         dpaa_sec_dp_dump = atoi(value);
3616         if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3617                 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3618                               "supported, changing to FULL error prints\n");
3619                 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3620         }
3621
3622         return 0;
3623 }
3624
3625 static void
3626 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3627 {
3628         struct rte_kvargs *kvlist;
3629
3630         if (!devargs)
3631                 return;
3632
3633         kvlist = rte_kvargs_parse(devargs->args, NULL);
3634         if (!kvlist)
3635                 return;
3636
3637         if (!rte_kvargs_count(kvlist, key)) {
3638                 rte_kvargs_free(kvlist);
3639                 return;
3640         }
3641
3642         rte_kvargs_process(kvlist, key,
3643                                 check_devargs_handler, NULL);
3644         rte_kvargs_free(kvlist);
3645 }
3646
3647 static int
3648 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3649 {
3650         struct dpaa_sec_dev_private *internals;
3651 #ifdef RTE_LIB_SECURITY
3652         struct rte_security_ctx *security_instance;
3653 #endif
3654         struct dpaa_sec_qp *qp;
3655         uint32_t i, flags;
3656         int ret;
3657
3658         PMD_INIT_FUNC_TRACE();
3659
3660         cryptodev->driver_id = dpaa_cryptodev_driver_id;
3661         cryptodev->dev_ops = &crypto_ops;
3662
3663         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3664         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3665         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3666                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3667                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3668                         RTE_CRYPTODEV_FF_SECURITY |
3669                         RTE_CRYPTODEV_FF_SYM_RAW_DP |
3670                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3671                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3672                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3673                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3674                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3675
3676         internals = cryptodev->data->dev_private;
3677         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3678         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3679
3680         /*
3681          * For secondary processes, we don't initialise any further as primary
3682          * has already done this work. Only check we don't need a different
3683          * RX function
3684          */
3685         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3686                 DPAA_SEC_WARN("Device already init by primary process");
3687                 return 0;
3688         }
3689 #ifdef RTE_LIB_SECURITY
3690         /* Initialize security_ctx only for primary process*/
3691         security_instance = rte_malloc("rte_security_instances_ops",
3692                                 sizeof(struct rte_security_ctx), 0);
3693         if (security_instance == NULL)
3694                 return -ENOMEM;
3695         security_instance->device = (void *)cryptodev;
3696         security_instance->ops = &dpaa_sec_security_ops;
3697         security_instance->sess_cnt = 0;
3698         cryptodev->security_ctx = security_instance;
3699 #endif
3700         rte_spinlock_init(&internals->lock);
3701         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3702                 /* init qman fq for queue pair */
3703                 qp = &internals->qps[i];
3704                 ret = dpaa_sec_init_tx(&qp->outq);
3705                 if (ret) {
3706                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3707                         goto init_error;
3708                 }
3709         }
3710
3711         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3712                 QMAN_FQ_FLAG_TO_DCPORTAL;
3713         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3714                 /* create rx qman fq for sessions*/
3715                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3716                 if (unlikely(ret != 0)) {
3717                         DPAA_SEC_ERR("sec qman_create_fq failed");
3718                         goto init_error;
3719                 }
3720         }
3721
3722         dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3723
3724         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3725         return 0;
3726
3727 init_error:
3728         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3729
3730         rte_free(cryptodev->security_ctx);
3731         return -EFAULT;
3732 }
3733
3734 static int
3735 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3736                                 struct rte_dpaa_device *dpaa_dev)
3737 {
3738         struct rte_cryptodev *cryptodev;
3739         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3740
3741         int retval;
3742
3743         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3744
3745         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3746         if (cryptodev == NULL)
3747                 return -ENOMEM;
3748
3749         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3750                 cryptodev->data->dev_private = rte_zmalloc_socket(
3751                                         "cryptodev private structure",
3752                                         sizeof(struct dpaa_sec_dev_private),
3753                                         RTE_CACHE_LINE_SIZE,
3754                                         rte_socket_id());
3755
3756                 if (cryptodev->data->dev_private == NULL)
3757                         rte_panic("Cannot allocate memzone for private "
3758                                         "device data");
3759         }
3760
3761         dpaa_dev->crypto_dev = cryptodev;
3762         cryptodev->device = &dpaa_dev->device;
3763
3764         /* init user callbacks */
3765         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3766
3767         /* if sec device version is not configured */
3768         if (!rta_get_sec_era()) {
3769                 const struct device_node *caam_node;
3770
3771                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3772                         const uint32_t *prop = of_get_property(caam_node,
3773                                         "fsl,sec-era",
3774                                         NULL);
3775                         if (prop) {
3776                                 rta_set_sec_era(
3777                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3778                                 break;
3779                         }
3780                 }
3781         }
3782
3783         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3784                 retval = rte_dpaa_portal_init((void *)1);
3785                 if (retval) {
3786                         DPAA_SEC_ERR("Unable to initialize portal");
3787                         goto out;
3788                 }
3789         }
3790
3791         /* Invoke PMD device initialization function */
3792         retval = dpaa_sec_dev_init(cryptodev);
3793         if (retval == 0) {
3794                 rte_cryptodev_pmd_probing_finish(cryptodev);
3795                 return 0;
3796         }
3797
3798         retval = -ENXIO;
3799 out:
3800         /* In case of error, cleanup is done */
3801         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3802                 rte_free(cryptodev->data->dev_private);
3803
3804         rte_cryptodev_pmd_release_device(cryptodev);
3805
3806         return retval;
3807 }
3808
3809 static int
3810 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3811 {
3812         struct rte_cryptodev *cryptodev;
3813         int ret;
3814
3815         cryptodev = dpaa_dev->crypto_dev;
3816         if (cryptodev == NULL)
3817                 return -ENODEV;
3818
3819         ret = dpaa_sec_uninit(cryptodev);
3820         if (ret)
3821                 return ret;
3822
3823         return rte_cryptodev_pmd_destroy(cryptodev);
3824 }
3825
3826 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3827         .drv_type = FSL_DPAA_CRYPTO,
3828         .driver = {
3829                 .name = "DPAA SEC PMD"
3830         },
3831         .probe = cryptodev_dpaa_sec_probe,
3832         .remove = cryptodev_dpaa_sec_remove,
3833 };
3834
3835 static struct cryptodev_driver dpaa_sec_crypto_drv;
3836
3837 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3838 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3839                 dpaa_cryptodev_driver_id);
3840 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3841                 DRIVER_DUMP_MODE "=<int>");
3842 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);