2e77a8af80101f0730d62d7907ab5c05d5ea6876
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_log.h>
41
42 enum rta_sec_era rta_sec_era;
43
44 int dpaa_logtype_sec;
45
46 static uint8_t cryptodev_driver_id;
47
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
50
51 static int
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53
54 static inline void
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 {
57         if (!ctx->fd_status) {
58                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59         } else {
60                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62         }
63
64         /* report op status to sym->op and then free the ctx memeory  */
65         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
66 }
67
68 static inline struct dpaa_sec_op_ctx *
69 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
70 {
71         struct dpaa_sec_op_ctx *ctx;
72         int retval;
73
74         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
75         if (!ctx || retval) {
76                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
77                 return NULL;
78         }
79         /*
80          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
81          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
82          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
83          * each packet, memset is costlier than dcbz_64().
84          */
85         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
86         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
87         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
88         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
89
90         ctx->ctx_pool = ses->ctx_pool;
91         ctx->vtop_offset = (size_t) ctx
92                                 - rte_mempool_virt2iova(ctx);
93
94         return ctx;
95 }
96
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
99 {
100         const struct rte_memseg *ms;
101
102         ms = rte_mem_virt2memseg(vaddr, NULL);
103         if (ms)
104                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
105         return (size_t)NULL;
106 }
107
108 static inline void *
109 dpaa_mem_ptov(rte_iova_t paddr)
110 {
111         void *va;
112
113         va = (void *)dpaax_iova_table_get_va(paddr);
114         if (likely(va))
115                 return va;
116
117         return rte_mem_iova2virt(paddr);
118 }
119
120 static void
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122                    struct qman_fq *fq,
123                    const struct qm_mr_entry *msg)
124 {
125         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
127 }
128
129 /* initialize the queue with dest chan as caam chan so that
130  * all the packets in this queue could be dispatched into caam
131  */
132 static int
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
134                  uint32_t fqid_out)
135 {
136         struct qm_mcc_initfq fq_opts;
137         uint32_t flags;
138         int ret = -1;
139
140         /* Clear FQ options */
141         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142
143         flags = QMAN_INITFQ_FLAG_SCHED;
144         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145                           QM_INITFQ_WE_CONTEXTB;
146
147         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148         fq_opts.fqd.context_b = fqid_out;
149         fq_opts.fqd.dest.channel = qm_channel_caam;
150         fq_opts.fqd.dest.wq = 0;
151
152         fq_in->cb.ern  = ern_sec_fq_handler;
153
154         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155
156         ret = qman_init_fq(fq_in, flags, &fq_opts);
157         if (unlikely(ret != 0))
158                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
159
160         return ret;
161 }
162
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166                   struct qman_fq *fq __always_unused,
167                   const struct qm_dqrr_entry *dqrr)
168 {
169         const struct qm_fd *fd;
170         struct dpaa_sec_job *job;
171         struct dpaa_sec_op_ctx *ctx;
172
173         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174                 return qman_cb_dqrr_defer;
175
176         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177                 return qman_cb_dqrr_consume;
178
179         fd = &dqrr->fd;
180         /* sg is embedded in an op ctx,
181          * sg[0] is for output
182          * sg[1] for input
183          */
184         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185
186         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187         ctx->fd_status = fd->status;
188         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189                 struct qm_sg_entry *sg_out;
190                 uint32_t len;
191
192                 sg_out = &job->sg[0];
193                 hw_sg_to_cpu(sg_out);
194                 len = sg_out->length;
195                 ctx->op->sym->m_src->pkt_len = len;
196                 ctx->op->sym->m_src->data_len = len;
197         }
198         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
199         dpaa_sec_op_ending(ctx);
200
201         return qman_cb_dqrr_consume;
202 }
203
204 /* caam result is put into this queue */
205 static int
206 dpaa_sec_init_tx(struct qman_fq *fq)
207 {
208         int ret;
209         struct qm_mcc_initfq opts;
210         uint32_t flags;
211
212         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
213                 QMAN_FQ_FLAG_DYNAMIC_FQID;
214
215         ret = qman_create_fq(0, flags, fq);
216         if (unlikely(ret)) {
217                 DPAA_SEC_ERR("qman_create_fq failed");
218                 return ret;
219         }
220
221         memset(&opts, 0, sizeof(opts));
222         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
223                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
224
225         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
226
227         fq->cb.dqrr = dqrr_out_fq_cb_rx;
228         fq->cb.ern  = ern_sec_fq_handler;
229
230         ret = qman_init_fq(fq, 0, &opts);
231         if (unlikely(ret)) {
232                 DPAA_SEC_ERR("unable to init caam source fq!");
233                 return ret;
234         }
235
236         return ret;
237 }
238
239 static inline int is_cipher_only(dpaa_sec_session *ses)
240 {
241         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
242                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
243 }
244
245 static inline int is_auth_only(dpaa_sec_session *ses)
246 {
247         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
248                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
249 }
250
251 static inline int is_aead(dpaa_sec_session *ses)
252 {
253         return ((ses->cipher_alg == 0) &&
254                 (ses->auth_alg == 0) &&
255                 (ses->aead_alg != 0));
256 }
257
258 static inline int is_auth_cipher(dpaa_sec_session *ses)
259 {
260         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
261                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
262                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
263 }
264
265 static inline int is_proto_ipsec(dpaa_sec_session *ses)
266 {
267         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
268 }
269
270 static inline int is_proto_pdcp(dpaa_sec_session *ses)
271 {
272         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
273 }
274
275 static inline int is_encode(dpaa_sec_session *ses)
276 {
277         return ses->dir == DIR_ENC;
278 }
279
280 static inline int is_decode(dpaa_sec_session *ses)
281 {
282         return ses->dir == DIR_DEC;
283 }
284
285 static inline void
286 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
287 {
288         switch (ses->auth_alg) {
289         case RTE_CRYPTO_AUTH_NULL:
290                 alginfo_a->algtype =
291                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292                         OP_PCL_IPSEC_HMAC_NULL : 0;
293                 ses->digest_length = 0;
294                 break;
295         case RTE_CRYPTO_AUTH_MD5_HMAC:
296                 alginfo_a->algtype =
297                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
299                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
300                 break;
301         case RTE_CRYPTO_AUTH_SHA1_HMAC:
302                 alginfo_a->algtype =
303                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
305                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
306                 break;
307         case RTE_CRYPTO_AUTH_SHA224_HMAC:
308                 alginfo_a->algtype =
309                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
311                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
312                 break;
313         case RTE_CRYPTO_AUTH_SHA256_HMAC:
314                 alginfo_a->algtype =
315                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
317                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
318                 break;
319         case RTE_CRYPTO_AUTH_SHA384_HMAC:
320                 alginfo_a->algtype =
321                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
322                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
323                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
324                 break;
325         case RTE_CRYPTO_AUTH_SHA512_HMAC:
326                 alginfo_a->algtype =
327                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
328                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
329                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
330                 break;
331         default:
332                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
333         }
334 }
335
336 static inline void
337 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
338 {
339         switch (ses->cipher_alg) {
340         case RTE_CRYPTO_CIPHER_NULL:
341                 alginfo_c->algtype =
342                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
343                         OP_PCL_IPSEC_NULL : 0;
344                 break;
345         case RTE_CRYPTO_CIPHER_AES_CBC:
346                 alginfo_c->algtype =
347                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
349                 alginfo_c->algmode = OP_ALG_AAI_CBC;
350                 break;
351         case RTE_CRYPTO_CIPHER_3DES_CBC:
352                 alginfo_c->algtype =
353                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
354                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
355                 alginfo_c->algmode = OP_ALG_AAI_CBC;
356                 break;
357         case RTE_CRYPTO_CIPHER_AES_CTR:
358                 alginfo_c->algtype =
359                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
360                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
361                 alginfo_c->algmode = OP_ALG_AAI_CTR;
362                 break;
363         default:
364                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
365         }
366 }
367
368 static inline void
369 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
370 {
371         switch (ses->aead_alg) {
372         case RTE_CRYPTO_AEAD_AES_GCM:
373                 alginfo->algtype = OP_ALG_ALGSEL_AES;
374                 alginfo->algmode = OP_ALG_AAI_GCM;
375                 break;
376         default:
377                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
378         }
379 }
380
381 static int
382 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
383 {
384         struct alginfo authdata = {0}, cipherdata = {0};
385         struct sec_cdb *cdb = &ses->cdb;
386         int32_t shared_desc_len = 0;
387         int err;
388 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
389         int swap = false;
390 #else
391         int swap = true;
392 #endif
393
394         switch (ses->cipher_alg) {
395         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
396                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
397                 break;
398         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
399                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
400                 break;
401         case RTE_CRYPTO_CIPHER_AES_CTR:
402                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
403                 break;
404         case RTE_CRYPTO_CIPHER_NULL:
405                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
406                 break;
407         default:
408                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
409                               ses->cipher_alg);
410                 return -1;
411         }
412
413         cipherdata.key = (size_t)ses->cipher_key.data;
414         cipherdata.keylen = ses->cipher_key.length;
415         cipherdata.key_enc_flags = 0;
416         cipherdata.key_type = RTA_DATA_IMM;
417
418         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
419                 switch (ses->auth_alg) {
420                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
421                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
422                         break;
423                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
424                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
425                         break;
426                 case RTE_CRYPTO_AUTH_AES_CMAC:
427                         authdata.algtype = PDCP_AUTH_TYPE_AES;
428                         break;
429                 case RTE_CRYPTO_AUTH_NULL:
430                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
431                         break;
432                 default:
433                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
434                                       ses->auth_alg);
435                         return -1;
436                 }
437
438                 authdata.key = (size_t)ses->auth_key.data;
439                 authdata.keylen = ses->auth_key.length;
440                 authdata.key_enc_flags = 0;
441                 authdata.key_type = RTA_DATA_IMM;
442
443                 cdb->sh_desc[0] = cipherdata.keylen;
444                 cdb->sh_desc[1] = authdata.keylen;
445                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
446                                        MIN_JOB_DESC_SIZE,
447                                        (unsigned int *)cdb->sh_desc,
448                                        &cdb->sh_desc[2], 2);
449
450                 if (err < 0) {
451                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
452                         return err;
453                 }
454                 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
455                         cipherdata.key = (size_t)dpaa_mem_vtop(
456                                                 (void *)(size_t)cipherdata.key);
457                         cipherdata.key_type = RTA_DATA_PTR;
458                 }
459                 if (!(cdb->sh_desc[2] & (1<<1)) &&  authdata.keylen) {
460                         authdata.key = (size_t)dpaa_mem_vtop(
461                                                 (void *)(size_t)authdata.key);
462                         authdata.key_type = RTA_DATA_PTR;
463                 }
464
465                 cdb->sh_desc[0] = 0;
466                 cdb->sh_desc[1] = 0;
467                 cdb->sh_desc[2] = 0;
468
469                 if (ses->dir == DIR_ENC)
470                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
471                                         cdb->sh_desc, 1, swap,
472                                         ses->pdcp.hfn,
473                                         ses->pdcp.bearer,
474                                         ses->pdcp.pkt_dir,
475                                         ses->pdcp.hfn_threshold,
476                                         &cipherdata, &authdata,
477                                         0);
478                 else if (ses->dir == DIR_DEC)
479                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
480                                         cdb->sh_desc, 1, swap,
481                                         ses->pdcp.hfn,
482                                         ses->pdcp.bearer,
483                                         ses->pdcp.pkt_dir,
484                                         ses->pdcp.hfn_threshold,
485                                         &cipherdata, &authdata,
486                                         0);
487         } else {
488                 cdb->sh_desc[0] = cipherdata.keylen;
489                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
490                                        MIN_JOB_DESC_SIZE,
491                                        (unsigned int *)cdb->sh_desc,
492                                        &cdb->sh_desc[2], 1);
493
494                 if (err < 0) {
495                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
496                         return err;
497                 }
498                 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
499                         cipherdata.key = (size_t)dpaa_mem_vtop(
500                                                 (void *)(size_t)cipherdata.key);
501                         cipherdata.key_type = RTA_DATA_PTR;
502                 }
503                 cdb->sh_desc[0] = 0;
504                 cdb->sh_desc[1] = 0;
505                 cdb->sh_desc[2] = 0;
506
507                 if (ses->dir == DIR_ENC)
508                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509                                         cdb->sh_desc, 1, swap,
510                                         ses->pdcp.sn_size,
511                                         ses->pdcp.hfn,
512                                         ses->pdcp.bearer,
513                                         ses->pdcp.pkt_dir,
514                                         ses->pdcp.hfn_threshold,
515                                         &cipherdata, 0);
516                 else if (ses->dir == DIR_DEC)
517                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518                                         cdb->sh_desc, 1, swap,
519                                         ses->pdcp.sn_size,
520                                         ses->pdcp.hfn,
521                                         ses->pdcp.bearer,
522                                         ses->pdcp.pkt_dir,
523                                         ses->pdcp.hfn_threshold,
524                                         &cipherdata, 0);
525         }
526
527         return shared_desc_len;
528 }
529
530 /* prepare ipsec proto command block of the session */
531 static int
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
533 {
534         struct alginfo cipherdata = {0}, authdata = {0};
535         struct sec_cdb *cdb = &ses->cdb;
536         int32_t shared_desc_len = 0;
537         int err;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
539         int swap = false;
540 #else
541         int swap = true;
542 #endif
543
544         caam_cipher_alg(ses, &cipherdata);
545         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546                 DPAA_SEC_ERR("not supported cipher alg");
547                 return -ENOTSUP;
548         }
549
550         cipherdata.key = (size_t)ses->cipher_key.data;
551         cipherdata.keylen = ses->cipher_key.length;
552         cipherdata.key_enc_flags = 0;
553         cipherdata.key_type = RTA_DATA_IMM;
554
555         caam_auth_alg(ses, &authdata);
556         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557                 DPAA_SEC_ERR("not supported auth alg");
558                 return -ENOTSUP;
559         }
560
561         authdata.key = (size_t)ses->auth_key.data;
562         authdata.keylen = ses->auth_key.length;
563         authdata.key_enc_flags = 0;
564         authdata.key_type = RTA_DATA_IMM;
565
566         cdb->sh_desc[0] = cipherdata.keylen;
567         cdb->sh_desc[1] = authdata.keylen;
568         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
569                                MIN_JOB_DESC_SIZE,
570                                (unsigned int *)cdb->sh_desc,
571                                &cdb->sh_desc[2], 2);
572
573         if (err < 0) {
574                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
575                 return err;
576         }
577         if (cdb->sh_desc[2] & 1)
578                 cipherdata.key_type = RTA_DATA_IMM;
579         else {
580                 cipherdata.key = (size_t)dpaa_mem_vtop(
581                                         (void *)(size_t)cipherdata.key);
582                 cipherdata.key_type = RTA_DATA_PTR;
583         }
584         if (cdb->sh_desc[2] & (1<<1))
585                 authdata.key_type = RTA_DATA_IMM;
586         else {
587                 authdata.key = (size_t)dpaa_mem_vtop(
588                                         (void *)(size_t)authdata.key);
589                 authdata.key_type = RTA_DATA_PTR;
590         }
591
592         cdb->sh_desc[0] = 0;
593         cdb->sh_desc[1] = 0;
594         cdb->sh_desc[2] = 0;
595         if (ses->dir == DIR_ENC) {
596                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
597                                 cdb->sh_desc,
598                                 true, swap, SHR_SERIAL,
599                                 &ses->encap_pdb,
600                                 (uint8_t *)&ses->ip4_hdr,
601                                 &cipherdata, &authdata);
602         } else if (ses->dir == DIR_DEC) {
603                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
604                                 cdb->sh_desc,
605                                 true, swap, SHR_SERIAL,
606                                 &ses->decap_pdb,
607                                 &cipherdata, &authdata);
608         }
609         return shared_desc_len;
610 }
611
612 /* prepare command block of the session */
613 static int
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
615 {
616         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617         int32_t shared_desc_len = 0;
618         struct sec_cdb *cdb = &ses->cdb;
619         int err;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
621         int swap = false;
622 #else
623         int swap = true;
624 #endif
625
626         memset(cdb, 0, sizeof(struct sec_cdb));
627
628         if (is_proto_ipsec(ses)) {
629                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630         } else if (is_proto_pdcp(ses)) {
631                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632         } else if (is_cipher_only(ses)) {
633                 caam_cipher_alg(ses, &alginfo_c);
634                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
635                         DPAA_SEC_ERR("not supported cipher alg");
636                         return -ENOTSUP;
637                 }
638
639                 alginfo_c.key = (size_t)ses->cipher_key.data;
640                 alginfo_c.keylen = ses->cipher_key.length;
641                 alginfo_c.key_enc_flags = 0;
642                 alginfo_c.key_type = RTA_DATA_IMM;
643
644                 shared_desc_len = cnstr_shdsc_blkcipher(
645                                                 cdb->sh_desc, true,
646                                                 swap, &alginfo_c,
647                                                 NULL,
648                                                 ses->iv.length,
649                                                 ses->dir);
650         } else if (is_auth_only(ses)) {
651                 caam_auth_alg(ses, &alginfo_a);
652                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
653                         DPAA_SEC_ERR("not supported auth alg");
654                         return -ENOTSUP;
655                 }
656
657                 alginfo_a.key = (size_t)ses->auth_key.data;
658                 alginfo_a.keylen = ses->auth_key.length;
659                 alginfo_a.key_enc_flags = 0;
660                 alginfo_a.key_type = RTA_DATA_IMM;
661
662                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
663                                                    swap, &alginfo_a,
664                                                    !ses->dir,
665                                                    ses->digest_length);
666         } else if (is_aead(ses)) {
667                 caam_aead_alg(ses, &alginfo);
668                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
669                         DPAA_SEC_ERR("not supported aead alg");
670                         return -ENOTSUP;
671                 }
672                 alginfo.key = (size_t)ses->aead_key.data;
673                 alginfo.keylen = ses->aead_key.length;
674                 alginfo.key_enc_flags = 0;
675                 alginfo.key_type = RTA_DATA_IMM;
676
677                 if (ses->dir == DIR_ENC)
678                         shared_desc_len = cnstr_shdsc_gcm_encap(
679                                         cdb->sh_desc, true, swap,
680                                         &alginfo,
681                                         ses->iv.length,
682                                         ses->digest_length);
683                 else
684                         shared_desc_len = cnstr_shdsc_gcm_decap(
685                                         cdb->sh_desc, true, swap,
686                                         &alginfo,
687                                         ses->iv.length,
688                                         ses->digest_length);
689         } else {
690                 caam_cipher_alg(ses, &alginfo_c);
691                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
692                         DPAA_SEC_ERR("not supported cipher alg");
693                         return -ENOTSUP;
694                 }
695
696                 alginfo_c.key = (size_t)ses->cipher_key.data;
697                 alginfo_c.keylen = ses->cipher_key.length;
698                 alginfo_c.key_enc_flags = 0;
699                 alginfo_c.key_type = RTA_DATA_IMM;
700
701                 caam_auth_alg(ses, &alginfo_a);
702                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
703                         DPAA_SEC_ERR("not supported auth alg");
704                         return -ENOTSUP;
705                 }
706
707                 alginfo_a.key = (size_t)ses->auth_key.data;
708                 alginfo_a.keylen = ses->auth_key.length;
709                 alginfo_a.key_enc_flags = 0;
710                 alginfo_a.key_type = RTA_DATA_IMM;
711
712                 cdb->sh_desc[0] = alginfo_c.keylen;
713                 cdb->sh_desc[1] = alginfo_a.keylen;
714                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
715                                        MIN_JOB_DESC_SIZE,
716                                        (unsigned int *)cdb->sh_desc,
717                                        &cdb->sh_desc[2], 2);
718
719                 if (err < 0) {
720                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
721                         return err;
722                 }
723                 if (cdb->sh_desc[2] & 1)
724                         alginfo_c.key_type = RTA_DATA_IMM;
725                 else {
726                         alginfo_c.key = (size_t)dpaa_mem_vtop(
727                                                 (void *)(size_t)alginfo_c.key);
728                         alginfo_c.key_type = RTA_DATA_PTR;
729                 }
730                 if (cdb->sh_desc[2] & (1<<1))
731                         alginfo_a.key_type = RTA_DATA_IMM;
732                 else {
733                         alginfo_a.key = (size_t)dpaa_mem_vtop(
734                                                 (void *)(size_t)alginfo_a.key);
735                         alginfo_a.key_type = RTA_DATA_PTR;
736                 }
737                 cdb->sh_desc[0] = 0;
738                 cdb->sh_desc[1] = 0;
739                 cdb->sh_desc[2] = 0;
740                 /* Auth_only_len is set as 0 here and it will be
741                  * overwritten in fd for each packet.
742                  */
743                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
744                                 true, swap, &alginfo_c, &alginfo_a,
745                                 ses->iv.length, 0,
746                                 ses->digest_length, ses->dir);
747         }
748
749         if (shared_desc_len < 0) {
750                 DPAA_SEC_ERR("error in preparing command block");
751                 return shared_desc_len;
752         }
753
754         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
755         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
756         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
757
758         return 0;
759 }
760
761 /* qp is lockless, should be accessed by only one thread */
762 static int
763 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
764 {
765         struct qman_fq *fq;
766         unsigned int pkts = 0;
767         int num_rx_bufs, ret;
768         struct qm_dqrr_entry *dq;
769         uint32_t vdqcr_flags = 0;
770
771         fq = &qp->outq;
772         /*
773          * Until request for four buffers, we provide exact number of buffers.
774          * Otherwise we do not set the QM_VDQCR_EXACT flag.
775          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
776          * requested, so we request two less in this case.
777          */
778         if (nb_ops < 4) {
779                 vdqcr_flags = QM_VDQCR_EXACT;
780                 num_rx_bufs = nb_ops;
781         } else {
782                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
783                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
784         }
785         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
786         if (ret)
787                 return 0;
788
789         do {
790                 const struct qm_fd *fd;
791                 struct dpaa_sec_job *job;
792                 struct dpaa_sec_op_ctx *ctx;
793                 struct rte_crypto_op *op;
794
795                 dq = qman_dequeue(fq);
796                 if (!dq)
797                         continue;
798
799                 fd = &dq->fd;
800                 /* sg is embedded in an op ctx,
801                  * sg[0] is for output
802                  * sg[1] for input
803                  */
804                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
805
806                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
807                 ctx->fd_status = fd->status;
808                 op = ctx->op;
809                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
810                         struct qm_sg_entry *sg_out;
811                         uint32_t len;
812
813                         sg_out = &job->sg[0];
814                         hw_sg_to_cpu(sg_out);
815                         len = sg_out->length;
816                         op->sym->m_src->pkt_len = len;
817                         op->sym->m_src->data_len = len;
818                 }
819                 if (!ctx->fd_status) {
820                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
821                 } else {
822                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
823                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
824                 }
825                 ops[pkts++] = op;
826
827                 /* report op status to sym->op and then free the ctx memeory */
828                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
829
830                 qman_dqrr_consume(fq, dq);
831         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
832
833         return pkts;
834 }
835
836 static inline struct dpaa_sec_job *
837 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
838 {
839         struct rte_crypto_sym_op *sym = op->sym;
840         struct rte_mbuf *mbuf = sym->m_src;
841         struct dpaa_sec_job *cf;
842         struct dpaa_sec_op_ctx *ctx;
843         struct qm_sg_entry *sg, *out_sg, *in_sg;
844         phys_addr_t start_addr;
845         uint8_t *old_digest, extra_segs;
846
847         if (is_decode(ses))
848                 extra_segs = 3;
849         else
850                 extra_segs = 2;
851
852         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
853                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
854                                 MAX_SG_ENTRIES);
855                 return NULL;
856         }
857         ctx = dpaa_sec_alloc_ctx(ses);
858         if (!ctx)
859                 return NULL;
860
861         cf = &ctx->job;
862         ctx->op = op;
863         old_digest = ctx->digest;
864
865         /* output */
866         out_sg = &cf->sg[0];
867         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
868         out_sg->length = ses->digest_length;
869         cpu_to_hw_sg(out_sg);
870
871         /* input */
872         in_sg = &cf->sg[1];
873         /* need to extend the input to a compound frame */
874         in_sg->extension = 1;
875         in_sg->final = 1;
876         in_sg->length = sym->auth.data.length;
877         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
878
879         /* 1st seg */
880         sg = in_sg + 1;
881         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
882         sg->length = mbuf->data_len - sym->auth.data.offset;
883         sg->offset = sym->auth.data.offset;
884
885         /* Successive segs */
886         mbuf = mbuf->next;
887         while (mbuf) {
888                 cpu_to_hw_sg(sg);
889                 sg++;
890                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
891                 sg->length = mbuf->data_len;
892                 mbuf = mbuf->next;
893         }
894
895         if (is_decode(ses)) {
896                 /* Digest verification case */
897                 cpu_to_hw_sg(sg);
898                 sg++;
899                 rte_memcpy(old_digest, sym->auth.digest.data,
900                                 ses->digest_length);
901                 start_addr = dpaa_mem_vtop(old_digest);
902                 qm_sg_entry_set64(sg, start_addr);
903                 sg->length = ses->digest_length;
904                 in_sg->length += ses->digest_length;
905         } else {
906                 /* Digest calculation case */
907                 sg->length -= ses->digest_length;
908         }
909         sg->final = 1;
910         cpu_to_hw_sg(sg);
911         cpu_to_hw_sg(in_sg);
912
913         return cf;
914 }
915
916 /**
917  * packet looks like:
918  *              |<----data_len------->|
919  *    |ip_header|ah_header|icv|payload|
920  *              ^
921  *              |
922  *         mbuf->pkt.data
923  */
924 static inline struct dpaa_sec_job *
925 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
926 {
927         struct rte_crypto_sym_op *sym = op->sym;
928         struct rte_mbuf *mbuf = sym->m_src;
929         struct dpaa_sec_job *cf;
930         struct dpaa_sec_op_ctx *ctx;
931         struct qm_sg_entry *sg;
932         rte_iova_t start_addr;
933         uint8_t *old_digest;
934
935         ctx = dpaa_sec_alloc_ctx(ses);
936         if (!ctx)
937                 return NULL;
938
939         cf = &ctx->job;
940         ctx->op = op;
941         old_digest = ctx->digest;
942
943         start_addr = rte_pktmbuf_iova(mbuf);
944         /* output */
945         sg = &cf->sg[0];
946         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
947         sg->length = ses->digest_length;
948         cpu_to_hw_sg(sg);
949
950         /* input */
951         sg = &cf->sg[1];
952         if (is_decode(ses)) {
953                 /* need to extend the input to a compound frame */
954                 sg->extension = 1;
955                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
956                 sg->length = sym->auth.data.length + ses->digest_length;
957                 sg->final = 1;
958                 cpu_to_hw_sg(sg);
959
960                 sg = &cf->sg[2];
961                 /* hash result or digest, save digest first */
962                 rte_memcpy(old_digest, sym->auth.digest.data,
963                            ses->digest_length);
964                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
965                 sg->length = sym->auth.data.length;
966                 cpu_to_hw_sg(sg);
967
968                 /* let's check digest by hw */
969                 start_addr = dpaa_mem_vtop(old_digest);
970                 sg++;
971                 qm_sg_entry_set64(sg, start_addr);
972                 sg->length = ses->digest_length;
973                 sg->final = 1;
974                 cpu_to_hw_sg(sg);
975         } else {
976                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
977                 sg->length = sym->auth.data.length;
978                 sg->final = 1;
979                 cpu_to_hw_sg(sg);
980         }
981
982         return cf;
983 }
984
985 static inline struct dpaa_sec_job *
986 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
987 {
988         struct rte_crypto_sym_op *sym = op->sym;
989         struct dpaa_sec_job *cf;
990         struct dpaa_sec_op_ctx *ctx;
991         struct qm_sg_entry *sg, *out_sg, *in_sg;
992         struct rte_mbuf *mbuf;
993         uint8_t req_segs;
994         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
995                         ses->iv.offset);
996
997         if (sym->m_dst) {
998                 mbuf = sym->m_dst;
999                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1000         } else {
1001                 mbuf = sym->m_src;
1002                 req_segs = mbuf->nb_segs * 2 + 3;
1003         }
1004
1005         if (req_segs > MAX_SG_ENTRIES) {
1006                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1007                                 MAX_SG_ENTRIES);
1008                 return NULL;
1009         }
1010
1011         ctx = dpaa_sec_alloc_ctx(ses);
1012         if (!ctx)
1013                 return NULL;
1014
1015         cf = &ctx->job;
1016         ctx->op = op;
1017
1018         /* output */
1019         out_sg = &cf->sg[0];
1020         out_sg->extension = 1;
1021         out_sg->length = sym->cipher.data.length;
1022         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1023         cpu_to_hw_sg(out_sg);
1024
1025         /* 1st seg */
1026         sg = &cf->sg[2];
1027         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1028         sg->length = mbuf->data_len - sym->cipher.data.offset;
1029         sg->offset = sym->cipher.data.offset;
1030
1031         /* Successive segs */
1032         mbuf = mbuf->next;
1033         while (mbuf) {
1034                 cpu_to_hw_sg(sg);
1035                 sg++;
1036                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1037                 sg->length = mbuf->data_len;
1038                 mbuf = mbuf->next;
1039         }
1040         sg->final = 1;
1041         cpu_to_hw_sg(sg);
1042
1043         /* input */
1044         mbuf = sym->m_src;
1045         in_sg = &cf->sg[1];
1046         in_sg->extension = 1;
1047         in_sg->final = 1;
1048         in_sg->length = sym->cipher.data.length + ses->iv.length;
1049
1050         sg++;
1051         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1052         cpu_to_hw_sg(in_sg);
1053
1054         /* IV */
1055         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1056         sg->length = ses->iv.length;
1057         cpu_to_hw_sg(sg);
1058
1059         /* 1st seg */
1060         sg++;
1061         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1062         sg->length = mbuf->data_len - sym->cipher.data.offset;
1063         sg->offset = sym->cipher.data.offset;
1064
1065         /* Successive segs */
1066         mbuf = mbuf->next;
1067         while (mbuf) {
1068                 cpu_to_hw_sg(sg);
1069                 sg++;
1070                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1071                 sg->length = mbuf->data_len;
1072                 mbuf = mbuf->next;
1073         }
1074         sg->final = 1;
1075         cpu_to_hw_sg(sg);
1076
1077         return cf;
1078 }
1079
1080 static inline struct dpaa_sec_job *
1081 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1082 {
1083         struct rte_crypto_sym_op *sym = op->sym;
1084         struct dpaa_sec_job *cf;
1085         struct dpaa_sec_op_ctx *ctx;
1086         struct qm_sg_entry *sg;
1087         rte_iova_t src_start_addr, dst_start_addr;
1088         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1089                         ses->iv.offset);
1090
1091         ctx = dpaa_sec_alloc_ctx(ses);
1092         if (!ctx)
1093                 return NULL;
1094
1095         cf = &ctx->job;
1096         ctx->op = op;
1097
1098         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1099
1100         if (sym->m_dst)
1101                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1102         else
1103                 dst_start_addr = src_start_addr;
1104
1105         /* output */
1106         sg = &cf->sg[0];
1107         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1108         sg->length = sym->cipher.data.length + ses->iv.length;
1109         cpu_to_hw_sg(sg);
1110
1111         /* input */
1112         sg = &cf->sg[1];
1113
1114         /* need to extend the input to a compound frame */
1115         sg->extension = 1;
1116         sg->final = 1;
1117         sg->length = sym->cipher.data.length + ses->iv.length;
1118         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1119         cpu_to_hw_sg(sg);
1120
1121         sg = &cf->sg[2];
1122         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1123         sg->length = ses->iv.length;
1124         cpu_to_hw_sg(sg);
1125
1126         sg++;
1127         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1128         sg->length = sym->cipher.data.length;
1129         sg->final = 1;
1130         cpu_to_hw_sg(sg);
1131
1132         return cf;
1133 }
1134
1135 static inline struct dpaa_sec_job *
1136 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1137 {
1138         struct rte_crypto_sym_op *sym = op->sym;
1139         struct dpaa_sec_job *cf;
1140         struct dpaa_sec_op_ctx *ctx;
1141         struct qm_sg_entry *sg, *out_sg, *in_sg;
1142         struct rte_mbuf *mbuf;
1143         uint8_t req_segs;
1144         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1145                         ses->iv.offset);
1146
1147         if (sym->m_dst) {
1148                 mbuf = sym->m_dst;
1149                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1150         } else {
1151                 mbuf = sym->m_src;
1152                 req_segs = mbuf->nb_segs * 2 + 4;
1153         }
1154
1155         if (ses->auth_only_len)
1156                 req_segs++;
1157
1158         if (req_segs > MAX_SG_ENTRIES) {
1159                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1160                                 MAX_SG_ENTRIES);
1161                 return NULL;
1162         }
1163
1164         ctx = dpaa_sec_alloc_ctx(ses);
1165         if (!ctx)
1166                 return NULL;
1167
1168         cf = &ctx->job;
1169         ctx->op = op;
1170
1171         rte_prefetch0(cf->sg);
1172
1173         /* output */
1174         out_sg = &cf->sg[0];
1175         out_sg->extension = 1;
1176         if (is_encode(ses))
1177                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1178                                                 + ses->digest_length;
1179         else
1180                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1181
1182         /* output sg entries */
1183         sg = &cf->sg[2];
1184         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1185         cpu_to_hw_sg(out_sg);
1186
1187         /* 1st seg */
1188         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1189         sg->length = mbuf->data_len - sym->aead.data.offset +
1190                                         ses->auth_only_len;
1191         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1192
1193         /* Successive segs */
1194         mbuf = mbuf->next;
1195         while (mbuf) {
1196                 cpu_to_hw_sg(sg);
1197                 sg++;
1198                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1199                 sg->length = mbuf->data_len;
1200                 mbuf = mbuf->next;
1201         }
1202         sg->length -= ses->digest_length;
1203
1204         if (is_encode(ses)) {
1205                 cpu_to_hw_sg(sg);
1206                 /* set auth output */
1207                 sg++;
1208                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1209                 sg->length = ses->digest_length;
1210         }
1211         sg->final = 1;
1212         cpu_to_hw_sg(sg);
1213
1214         /* input */
1215         mbuf = sym->m_src;
1216         in_sg = &cf->sg[1];
1217         in_sg->extension = 1;
1218         in_sg->final = 1;
1219         if (is_encode(ses))
1220                 in_sg->length = ses->iv.length + sym->aead.data.length
1221                                                         + ses->auth_only_len;
1222         else
1223                 in_sg->length = ses->iv.length + sym->aead.data.length
1224                                 + ses->auth_only_len + ses->digest_length;
1225
1226         /* input sg entries */
1227         sg++;
1228         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1229         cpu_to_hw_sg(in_sg);
1230
1231         /* 1st seg IV */
1232         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1233         sg->length = ses->iv.length;
1234         cpu_to_hw_sg(sg);
1235
1236         /* 2nd seg auth only */
1237         if (ses->auth_only_len) {
1238                 sg++;
1239                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1240                 sg->length = ses->auth_only_len;
1241                 cpu_to_hw_sg(sg);
1242         }
1243
1244         /* 3rd seg */
1245         sg++;
1246         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1247         sg->length = mbuf->data_len - sym->aead.data.offset;
1248         sg->offset = sym->aead.data.offset;
1249
1250         /* Successive segs */
1251         mbuf = mbuf->next;
1252         while (mbuf) {
1253                 cpu_to_hw_sg(sg);
1254                 sg++;
1255                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1256                 sg->length = mbuf->data_len;
1257                 mbuf = mbuf->next;
1258         }
1259
1260         if (is_decode(ses)) {
1261                 cpu_to_hw_sg(sg);
1262                 sg++;
1263                 memcpy(ctx->digest, sym->aead.digest.data,
1264                         ses->digest_length);
1265                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1266                 sg->length = ses->digest_length;
1267         }
1268         sg->final = 1;
1269         cpu_to_hw_sg(sg);
1270
1271         return cf;
1272 }
1273
1274 static inline struct dpaa_sec_job *
1275 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1276 {
1277         struct rte_crypto_sym_op *sym = op->sym;
1278         struct dpaa_sec_job *cf;
1279         struct dpaa_sec_op_ctx *ctx;
1280         struct qm_sg_entry *sg;
1281         uint32_t length = 0;
1282         rte_iova_t src_start_addr, dst_start_addr;
1283         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1284                         ses->iv.offset);
1285
1286         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1287
1288         if (sym->m_dst)
1289                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1290         else
1291                 dst_start_addr = src_start_addr;
1292
1293         ctx = dpaa_sec_alloc_ctx(ses);
1294         if (!ctx)
1295                 return NULL;
1296
1297         cf = &ctx->job;
1298         ctx->op = op;
1299
1300         /* input */
1301         rte_prefetch0(cf->sg);
1302         sg = &cf->sg[2];
1303         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1304         if (is_encode(ses)) {
1305                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1306                 sg->length = ses->iv.length;
1307                 length += sg->length;
1308                 cpu_to_hw_sg(sg);
1309
1310                 sg++;
1311                 if (ses->auth_only_len) {
1312                         qm_sg_entry_set64(sg,
1313                                           dpaa_mem_vtop(sym->aead.aad.data));
1314                         sg->length = ses->auth_only_len;
1315                         length += sg->length;
1316                         cpu_to_hw_sg(sg);
1317                         sg++;
1318                 }
1319                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1320                 sg->length = sym->aead.data.length;
1321                 length += sg->length;
1322                 sg->final = 1;
1323                 cpu_to_hw_sg(sg);
1324         } else {
1325                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1326                 sg->length = ses->iv.length;
1327                 length += sg->length;
1328                 cpu_to_hw_sg(sg);
1329
1330                 sg++;
1331                 if (ses->auth_only_len) {
1332                         qm_sg_entry_set64(sg,
1333                                           dpaa_mem_vtop(sym->aead.aad.data));
1334                         sg->length = ses->auth_only_len;
1335                         length += sg->length;
1336                         cpu_to_hw_sg(sg);
1337                         sg++;
1338                 }
1339                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1340                 sg->length = sym->aead.data.length;
1341                 length += sg->length;
1342                 cpu_to_hw_sg(sg);
1343
1344                 memcpy(ctx->digest, sym->aead.digest.data,
1345                        ses->digest_length);
1346                 sg++;
1347
1348                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1349                 sg->length = ses->digest_length;
1350                 length += sg->length;
1351                 sg->final = 1;
1352                 cpu_to_hw_sg(sg);
1353         }
1354         /* input compound frame */
1355         cf->sg[1].length = length;
1356         cf->sg[1].extension = 1;
1357         cf->sg[1].final = 1;
1358         cpu_to_hw_sg(&cf->sg[1]);
1359
1360         /* output */
1361         sg++;
1362         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1363         qm_sg_entry_set64(sg,
1364                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1365         sg->length = sym->aead.data.length + ses->auth_only_len;
1366         length = sg->length;
1367         if (is_encode(ses)) {
1368                 cpu_to_hw_sg(sg);
1369                 /* set auth output */
1370                 sg++;
1371                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1372                 sg->length = ses->digest_length;
1373                 length += sg->length;
1374         }
1375         sg->final = 1;
1376         cpu_to_hw_sg(sg);
1377
1378         /* output compound frame */
1379         cf->sg[0].length = length;
1380         cf->sg[0].extension = 1;
1381         cpu_to_hw_sg(&cf->sg[0]);
1382
1383         return cf;
1384 }
1385
1386 static inline struct dpaa_sec_job *
1387 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1388 {
1389         struct rte_crypto_sym_op *sym = op->sym;
1390         struct dpaa_sec_job *cf;
1391         struct dpaa_sec_op_ctx *ctx;
1392         struct qm_sg_entry *sg, *out_sg, *in_sg;
1393         struct rte_mbuf *mbuf;
1394         uint8_t req_segs;
1395         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1396                         ses->iv.offset);
1397
1398         if (sym->m_dst) {
1399                 mbuf = sym->m_dst;
1400                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1401         } else {
1402                 mbuf = sym->m_src;
1403                 req_segs = mbuf->nb_segs * 2 + 4;
1404         }
1405
1406         if (req_segs > MAX_SG_ENTRIES) {
1407                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1408                                 MAX_SG_ENTRIES);
1409                 return NULL;
1410         }
1411
1412         ctx = dpaa_sec_alloc_ctx(ses);
1413         if (!ctx)
1414                 return NULL;
1415
1416         cf = &ctx->job;
1417         ctx->op = op;
1418
1419         rte_prefetch0(cf->sg);
1420
1421         /* output */
1422         out_sg = &cf->sg[0];
1423         out_sg->extension = 1;
1424         if (is_encode(ses))
1425                 out_sg->length = sym->auth.data.length + ses->digest_length;
1426         else
1427                 out_sg->length = sym->auth.data.length;
1428
1429         /* output sg entries */
1430         sg = &cf->sg[2];
1431         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1432         cpu_to_hw_sg(out_sg);
1433
1434         /* 1st seg */
1435         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1436         sg->length = mbuf->data_len - sym->auth.data.offset;
1437         sg->offset = sym->auth.data.offset;
1438
1439         /* Successive segs */
1440         mbuf = mbuf->next;
1441         while (mbuf) {
1442                 cpu_to_hw_sg(sg);
1443                 sg++;
1444                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1445                 sg->length = mbuf->data_len;
1446                 mbuf = mbuf->next;
1447         }
1448         sg->length -= ses->digest_length;
1449
1450         if (is_encode(ses)) {
1451                 cpu_to_hw_sg(sg);
1452                 /* set auth output */
1453                 sg++;
1454                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1455                 sg->length = ses->digest_length;
1456         }
1457         sg->final = 1;
1458         cpu_to_hw_sg(sg);
1459
1460         /* input */
1461         mbuf = sym->m_src;
1462         in_sg = &cf->sg[1];
1463         in_sg->extension = 1;
1464         in_sg->final = 1;
1465         if (is_encode(ses))
1466                 in_sg->length = ses->iv.length + sym->auth.data.length;
1467         else
1468                 in_sg->length = ses->iv.length + sym->auth.data.length
1469                                                 + ses->digest_length;
1470
1471         /* input sg entries */
1472         sg++;
1473         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1474         cpu_to_hw_sg(in_sg);
1475
1476         /* 1st seg IV */
1477         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1478         sg->length = ses->iv.length;
1479         cpu_to_hw_sg(sg);
1480
1481         /* 2nd seg */
1482         sg++;
1483         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1484         sg->length = mbuf->data_len - sym->auth.data.offset;
1485         sg->offset = sym->auth.data.offset;
1486
1487         /* Successive segs */
1488         mbuf = mbuf->next;
1489         while (mbuf) {
1490                 cpu_to_hw_sg(sg);
1491                 sg++;
1492                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1493                 sg->length = mbuf->data_len;
1494                 mbuf = mbuf->next;
1495         }
1496
1497         sg->length -= ses->digest_length;
1498         if (is_decode(ses)) {
1499                 cpu_to_hw_sg(sg);
1500                 sg++;
1501                 memcpy(ctx->digest, sym->auth.digest.data,
1502                         ses->digest_length);
1503                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1504                 sg->length = ses->digest_length;
1505         }
1506         sg->final = 1;
1507         cpu_to_hw_sg(sg);
1508
1509         return cf;
1510 }
1511
1512 static inline struct dpaa_sec_job *
1513 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1514 {
1515         struct rte_crypto_sym_op *sym = op->sym;
1516         struct dpaa_sec_job *cf;
1517         struct dpaa_sec_op_ctx *ctx;
1518         struct qm_sg_entry *sg;
1519         rte_iova_t src_start_addr, dst_start_addr;
1520         uint32_t length = 0;
1521         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1522                         ses->iv.offset);
1523
1524         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1525         if (sym->m_dst)
1526                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1527         else
1528                 dst_start_addr = src_start_addr;
1529
1530         ctx = dpaa_sec_alloc_ctx(ses);
1531         if (!ctx)
1532                 return NULL;
1533
1534         cf = &ctx->job;
1535         ctx->op = op;
1536
1537         /* input */
1538         rte_prefetch0(cf->sg);
1539         sg = &cf->sg[2];
1540         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1541         if (is_encode(ses)) {
1542                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1543                 sg->length = ses->iv.length;
1544                 length += sg->length;
1545                 cpu_to_hw_sg(sg);
1546
1547                 sg++;
1548                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1549                 sg->length = sym->auth.data.length;
1550                 length += sg->length;
1551                 sg->final = 1;
1552                 cpu_to_hw_sg(sg);
1553         } else {
1554                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1555                 sg->length = ses->iv.length;
1556                 length += sg->length;
1557                 cpu_to_hw_sg(sg);
1558
1559                 sg++;
1560
1561                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1562                 sg->length = sym->auth.data.length;
1563                 length += sg->length;
1564                 cpu_to_hw_sg(sg);
1565
1566                 memcpy(ctx->digest, sym->auth.digest.data,
1567                        ses->digest_length);
1568                 sg++;
1569
1570                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1571                 sg->length = ses->digest_length;
1572                 length += sg->length;
1573                 sg->final = 1;
1574                 cpu_to_hw_sg(sg);
1575         }
1576         /* input compound frame */
1577         cf->sg[1].length = length;
1578         cf->sg[1].extension = 1;
1579         cf->sg[1].final = 1;
1580         cpu_to_hw_sg(&cf->sg[1]);
1581
1582         /* output */
1583         sg++;
1584         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1585         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1586         sg->length = sym->cipher.data.length;
1587         length = sg->length;
1588         if (is_encode(ses)) {
1589                 cpu_to_hw_sg(sg);
1590                 /* set auth output */
1591                 sg++;
1592                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1593                 sg->length = ses->digest_length;
1594                 length += sg->length;
1595         }
1596         sg->final = 1;
1597         cpu_to_hw_sg(sg);
1598
1599         /* output compound frame */
1600         cf->sg[0].length = length;
1601         cf->sg[0].extension = 1;
1602         cpu_to_hw_sg(&cf->sg[0]);
1603
1604         return cf;
1605 }
1606
1607 static inline struct dpaa_sec_job *
1608 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1609 {
1610         struct rte_crypto_sym_op *sym = op->sym;
1611         struct dpaa_sec_job *cf;
1612         struct dpaa_sec_op_ctx *ctx;
1613         struct qm_sg_entry *sg;
1614         phys_addr_t src_start_addr, dst_start_addr;
1615
1616         ctx = dpaa_sec_alloc_ctx(ses);
1617         if (!ctx)
1618                 return NULL;
1619         cf = &ctx->job;
1620         ctx->op = op;
1621
1622         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1623
1624         if (sym->m_dst)
1625                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1626         else
1627                 dst_start_addr = src_start_addr;
1628
1629         /* input */
1630         sg = &cf->sg[1];
1631         qm_sg_entry_set64(sg, src_start_addr);
1632         sg->length = sym->m_src->pkt_len;
1633         sg->final = 1;
1634         cpu_to_hw_sg(sg);
1635
1636         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1637         /* output */
1638         sg = &cf->sg[0];
1639         qm_sg_entry_set64(sg, dst_start_addr);
1640         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1641         cpu_to_hw_sg(sg);
1642
1643         return cf;
1644 }
1645
1646 static uint16_t
1647 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1648                        uint16_t nb_ops)
1649 {
1650         /* Function to transmit the frames to given device and queuepair */
1651         uint32_t loop;
1652         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1653         uint16_t num_tx = 0;
1654         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1655         uint32_t frames_to_send;
1656         struct rte_crypto_op *op;
1657         struct dpaa_sec_job *cf;
1658         dpaa_sec_session *ses;
1659         uint32_t auth_only_len;
1660         struct qman_fq *inq[DPAA_SEC_BURST];
1661
1662         while (nb_ops) {
1663                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1664                                 DPAA_SEC_BURST : nb_ops;
1665                 for (loop = 0; loop < frames_to_send; loop++) {
1666                         op = *(ops++);
1667                         switch (op->sess_type) {
1668                         case RTE_CRYPTO_OP_WITH_SESSION:
1669                                 ses = (dpaa_sec_session *)
1670                                         get_sym_session_private_data(
1671                                                         op->sym->session,
1672                                                         cryptodev_driver_id);
1673                                 break;
1674                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1675                                 ses = (dpaa_sec_session *)
1676                                         get_sec_session_private_data(
1677                                                         op->sym->sec_session);
1678                                 break;
1679                         default:
1680                                 DPAA_SEC_DP_ERR(
1681                                         "sessionless crypto op not supported");
1682                                 frames_to_send = loop;
1683                                 nb_ops = loop;
1684                                 goto send_pkts;
1685                         }
1686                         if (unlikely(!ses->qp)) {
1687                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1688                                         frames_to_send = loop;
1689                                         nb_ops = loop;
1690                                         goto send_pkts;
1691                                 }
1692                         } else if (unlikely(ses->qp != qp)) {
1693                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1694                                         " New qp = %p\n", ses->qp, qp);
1695                                 frames_to_send = loop;
1696                                 nb_ops = loop;
1697                                 goto send_pkts;
1698                         }
1699
1700                         auth_only_len = op->sym->auth.data.length -
1701                                                 op->sym->cipher.data.length;
1702                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1703                                 if (is_proto_ipsec(ses)) {
1704                                         cf = build_proto(op, ses);
1705                                 } else if (is_proto_pdcp(ses)) {
1706                                         cf = build_proto(op, ses);
1707                                 } else if (is_auth_only(ses)) {
1708                                         cf = build_auth_only(op, ses);
1709                                 } else if (is_cipher_only(ses)) {
1710                                         cf = build_cipher_only(op, ses);
1711                                 } else if (is_aead(ses)) {
1712                                         cf = build_cipher_auth_gcm(op, ses);
1713                                         auth_only_len = ses->auth_only_len;
1714                                 } else if (is_auth_cipher(ses)) {
1715                                         cf = build_cipher_auth(op, ses);
1716                                 } else {
1717                                         DPAA_SEC_DP_ERR("not supported ops");
1718                                         frames_to_send = loop;
1719                                         nb_ops = loop;
1720                                         goto send_pkts;
1721                                 }
1722                         } else {
1723                                 if (is_auth_only(ses)) {
1724                                         cf = build_auth_only_sg(op, ses);
1725                                 } else if (is_cipher_only(ses)) {
1726                                         cf = build_cipher_only_sg(op, ses);
1727                                 } else if (is_aead(ses)) {
1728                                         cf = build_cipher_auth_gcm_sg(op, ses);
1729                                         auth_only_len = ses->auth_only_len;
1730                                 } else if (is_auth_cipher(ses)) {
1731                                         cf = build_cipher_auth_sg(op, ses);
1732                                 } else {
1733                                         DPAA_SEC_DP_ERR("not supported ops");
1734                                         frames_to_send = loop;
1735                                         nb_ops = loop;
1736                                         goto send_pkts;
1737                                 }
1738                         }
1739                         if (unlikely(!cf)) {
1740                                 frames_to_send = loop;
1741                                 nb_ops = loop;
1742                                 goto send_pkts;
1743                         }
1744
1745                         fd = &fds[loop];
1746                         inq[loop] = ses->inq;
1747                         fd->opaque_addr = 0;
1748                         fd->cmd = 0;
1749                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1750                         fd->_format1 = qm_fd_compound;
1751                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1752                         /* Auth_only_len is set as 0 in descriptor and it is
1753                          * overwritten here in the fd.cmd which will update
1754                          * the DPOVRD reg.
1755                          */
1756                         if (auth_only_len)
1757                                 fd->cmd = 0x80000000 | auth_only_len;
1758
1759                 }
1760 send_pkts:
1761                 loop = 0;
1762                 while (loop < frames_to_send) {
1763                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1764                                         frames_to_send - loop);
1765                 }
1766                 nb_ops -= frames_to_send;
1767                 num_tx += frames_to_send;
1768         }
1769
1770         dpaa_qp->tx_pkts += num_tx;
1771         dpaa_qp->tx_errs += nb_ops - num_tx;
1772
1773         return num_tx;
1774 }
1775
1776 static uint16_t
1777 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1778                        uint16_t nb_ops)
1779 {
1780         uint16_t num_rx;
1781         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1782
1783         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1784
1785         dpaa_qp->rx_pkts += num_rx;
1786         dpaa_qp->rx_errs += nb_ops - num_rx;
1787
1788         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1789
1790         return num_rx;
1791 }
1792
1793 /** Release queue pair */
1794 static int
1795 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1796                             uint16_t qp_id)
1797 {
1798         struct dpaa_sec_dev_private *internals;
1799         struct dpaa_sec_qp *qp = NULL;
1800
1801         PMD_INIT_FUNC_TRACE();
1802
1803         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1804
1805         internals = dev->data->dev_private;
1806         if (qp_id >= internals->max_nb_queue_pairs) {
1807                 DPAA_SEC_ERR("Max supported qpid %d",
1808                              internals->max_nb_queue_pairs);
1809                 return -EINVAL;
1810         }
1811
1812         qp = &internals->qps[qp_id];
1813         qp->internals = NULL;
1814         dev->data->queue_pairs[qp_id] = NULL;
1815
1816         return 0;
1817 }
1818
1819 /** Setup a queue pair */
1820 static int
1821 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1822                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1823                 __rte_unused int socket_id)
1824 {
1825         struct dpaa_sec_dev_private *internals;
1826         struct dpaa_sec_qp *qp = NULL;
1827
1828         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1829
1830         internals = dev->data->dev_private;
1831         if (qp_id >= internals->max_nb_queue_pairs) {
1832                 DPAA_SEC_ERR("Max supported qpid %d",
1833                              internals->max_nb_queue_pairs);
1834                 return -EINVAL;
1835         }
1836
1837         qp = &internals->qps[qp_id];
1838         qp->internals = internals;
1839         dev->data->queue_pairs[qp_id] = qp;
1840
1841         return 0;
1842 }
1843
1844 /** Return the number of allocated queue pairs */
1845 static uint32_t
1846 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1847 {
1848         PMD_INIT_FUNC_TRACE();
1849
1850         return dev->data->nb_queue_pairs;
1851 }
1852
1853 /** Returns the size of session structure */
1854 static unsigned int
1855 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1856 {
1857         PMD_INIT_FUNC_TRACE();
1858
1859         return sizeof(dpaa_sec_session);
1860 }
1861
1862 static int
1863 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1864                      struct rte_crypto_sym_xform *xform,
1865                      dpaa_sec_session *session)
1866 {
1867         session->cipher_alg = xform->cipher.algo;
1868         session->iv.length = xform->cipher.iv.length;
1869         session->iv.offset = xform->cipher.iv.offset;
1870         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1871                                                RTE_CACHE_LINE_SIZE);
1872         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1873                 DPAA_SEC_ERR("No Memory for cipher key");
1874                 return -ENOMEM;
1875         }
1876         session->cipher_key.length = xform->cipher.key.length;
1877
1878         memcpy(session->cipher_key.data, xform->cipher.key.data,
1879                xform->cipher.key.length);
1880         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1881                         DIR_ENC : DIR_DEC;
1882
1883         return 0;
1884 }
1885
1886 static int
1887 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1888                    struct rte_crypto_sym_xform *xform,
1889                    dpaa_sec_session *session)
1890 {
1891         session->auth_alg = xform->auth.algo;
1892         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1893                                              RTE_CACHE_LINE_SIZE);
1894         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1895                 DPAA_SEC_ERR("No Memory for auth key");
1896                 return -ENOMEM;
1897         }
1898         session->auth_key.length = xform->auth.key.length;
1899         session->digest_length = xform->auth.digest_length;
1900
1901         memcpy(session->auth_key.data, xform->auth.key.data,
1902                xform->auth.key.length);
1903         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1904                         DIR_ENC : DIR_DEC;
1905
1906         return 0;
1907 }
1908
1909 static int
1910 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1911                    struct rte_crypto_sym_xform *xform,
1912                    dpaa_sec_session *session)
1913 {
1914         session->aead_alg = xform->aead.algo;
1915         session->iv.length = xform->aead.iv.length;
1916         session->iv.offset = xform->aead.iv.offset;
1917         session->auth_only_len = xform->aead.aad_length;
1918         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1919                                              RTE_CACHE_LINE_SIZE);
1920         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1921                 DPAA_SEC_ERR("No Memory for aead key\n");
1922                 return -ENOMEM;
1923         }
1924         session->aead_key.length = xform->aead.key.length;
1925         session->digest_length = xform->aead.digest_length;
1926
1927         memcpy(session->aead_key.data, xform->aead.key.data,
1928                xform->aead.key.length);
1929         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1930                         DIR_ENC : DIR_DEC;
1931
1932         return 0;
1933 }
1934
1935 static struct qman_fq *
1936 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1937 {
1938         unsigned int i;
1939
1940         for (i = 0; i < qi->max_nb_sessions; i++) {
1941                 if (qi->inq_attach[i] == 0) {
1942                         qi->inq_attach[i] = 1;
1943                         return &qi->inq[i];
1944                 }
1945         }
1946         DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1947
1948         return NULL;
1949 }
1950
1951 static int
1952 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1953 {
1954         unsigned int i;
1955
1956         for (i = 0; i < qi->max_nb_sessions; i++) {
1957                 if (&qi->inq[i] == fq) {
1958                         qman_retire_fq(fq, NULL);
1959                         qman_oos_fq(fq);
1960                         qi->inq_attach[i] = 0;
1961                         return 0;
1962                 }
1963         }
1964         return -1;
1965 }
1966
1967 static int
1968 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1969 {
1970         int ret;
1971
1972         sess->qp = qp;
1973         ret = dpaa_sec_prep_cdb(sess);
1974         if (ret) {
1975                 DPAA_SEC_ERR("Unable to prepare sec cdb");
1976                 return -1;
1977         }
1978         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1979                 ret = rte_dpaa_portal_init((void *)0);
1980                 if (ret) {
1981                         DPAA_SEC_ERR("Failure in affining portal");
1982                         return ret;
1983                 }
1984         }
1985         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1986                                qman_fq_fqid(&qp->outq));
1987         if (ret)
1988                 DPAA_SEC_ERR("Unable to init sec queue");
1989
1990         return ret;
1991 }
1992
1993 static int
1994 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1995                             struct rte_crypto_sym_xform *xform, void *sess)
1996 {
1997         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1998         dpaa_sec_session *session = sess;
1999
2000         PMD_INIT_FUNC_TRACE();
2001
2002         if (unlikely(sess == NULL)) {
2003                 DPAA_SEC_ERR("invalid session struct");
2004                 return -EINVAL;
2005         }
2006         memset(session, 0, sizeof(dpaa_sec_session));
2007
2008         /* Default IV length = 0 */
2009         session->iv.length = 0;
2010
2011         /* Cipher Only */
2012         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2013                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2014                 dpaa_sec_cipher_init(dev, xform, session);
2015
2016         /* Authentication Only */
2017         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2018                    xform->next == NULL) {
2019                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2020                 dpaa_sec_auth_init(dev, xform, session);
2021
2022         /* Cipher then Authenticate */
2023         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2024                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2025                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2026                         dpaa_sec_cipher_init(dev, xform, session);
2027                         dpaa_sec_auth_init(dev, xform->next, session);
2028                 } else {
2029                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2030                         return -EINVAL;
2031                 }
2032
2033         /* Authenticate then Cipher */
2034         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2035                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2036                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2037                         dpaa_sec_auth_init(dev, xform, session);
2038                         dpaa_sec_cipher_init(dev, xform->next, session);
2039                 } else {
2040                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2041                         return -EINVAL;
2042                 }
2043
2044         /* AEAD operation for AES-GCM kind of Algorithms */
2045         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2046                    xform->next == NULL) {
2047                 dpaa_sec_aead_init(dev, xform, session);
2048
2049         } else {
2050                 DPAA_SEC_ERR("Invalid crypto type");
2051                 return -EINVAL;
2052         }
2053         session->ctx_pool = internals->ctx_pool;
2054         rte_spinlock_lock(&internals->lock);
2055         session->inq = dpaa_sec_attach_rxq(internals);
2056         rte_spinlock_unlock(&internals->lock);
2057         if (session->inq == NULL) {
2058                 DPAA_SEC_ERR("unable to attach sec queue");
2059                 goto err1;
2060         }
2061
2062         return 0;
2063
2064 err1:
2065         rte_free(session->cipher_key.data);
2066         rte_free(session->auth_key.data);
2067         memset(session, 0, sizeof(dpaa_sec_session));
2068
2069         return -EINVAL;
2070 }
2071
2072 static int
2073 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2074                 struct rte_crypto_sym_xform *xform,
2075                 struct rte_cryptodev_sym_session *sess,
2076                 struct rte_mempool *mempool)
2077 {
2078         void *sess_private_data;
2079         int ret;
2080
2081         PMD_INIT_FUNC_TRACE();
2082
2083         if (rte_mempool_get(mempool, &sess_private_data)) {
2084                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2085                 return -ENOMEM;
2086         }
2087
2088         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2089         if (ret != 0) {
2090                 DPAA_SEC_ERR("failed to configure session parameters");
2091
2092                 /* Return session to mempool */
2093                 rte_mempool_put(mempool, sess_private_data);
2094                 return ret;
2095         }
2096
2097         set_sym_session_private_data(sess, dev->driver_id,
2098                         sess_private_data);
2099
2100
2101         return 0;
2102 }
2103
2104 /** Clear the memory of session so it doesn't leave key material behind */
2105 static void
2106 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2107                 struct rte_cryptodev_sym_session *sess)
2108 {
2109         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2110         uint8_t index = dev->driver_id;
2111         void *sess_priv = get_sym_session_private_data(sess, index);
2112
2113         PMD_INIT_FUNC_TRACE();
2114
2115         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2116
2117         if (sess_priv) {
2118                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2119
2120                 if (s->inq)
2121                         dpaa_sec_detach_rxq(qi, s->inq);
2122                 rte_free(s->cipher_key.data);
2123                 rte_free(s->auth_key.data);
2124                 memset(s, 0, sizeof(dpaa_sec_session));
2125                 set_sym_session_private_data(sess, index, NULL);
2126                 rte_mempool_put(sess_mp, sess_priv);
2127         }
2128 }
2129
2130 static int
2131 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2132                            struct rte_security_session_conf *conf,
2133                            void *sess)
2134 {
2135         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2136         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2137         struct rte_crypto_auth_xform *auth_xform = NULL;
2138         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2139         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2140
2141         PMD_INIT_FUNC_TRACE();
2142
2143         memset(session, 0, sizeof(dpaa_sec_session));
2144         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2145                 cipher_xform = &conf->crypto_xform->cipher;
2146                 if (conf->crypto_xform->next)
2147                         auth_xform = &conf->crypto_xform->next->auth;
2148         } else {
2149                 auth_xform = &conf->crypto_xform->auth;
2150                 if (conf->crypto_xform->next)
2151                         cipher_xform = &conf->crypto_xform->next->cipher;
2152         }
2153         session->proto_alg = conf->protocol;
2154
2155         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2156                 session->cipher_key.data = rte_zmalloc(NULL,
2157                                                        cipher_xform->key.length,
2158                                                        RTE_CACHE_LINE_SIZE);
2159                 if (session->cipher_key.data == NULL &&
2160                                 cipher_xform->key.length > 0) {
2161                         DPAA_SEC_ERR("No Memory for cipher key");
2162                         return -ENOMEM;
2163                 }
2164                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2165                                 cipher_xform->key.length);
2166                 session->cipher_key.length = cipher_xform->key.length;
2167
2168                 switch (cipher_xform->algo) {
2169                 case RTE_CRYPTO_CIPHER_AES_CBC:
2170                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2171                 case RTE_CRYPTO_CIPHER_AES_CTR:
2172                         break;
2173                 default:
2174                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2175                                 cipher_xform->algo);
2176                         goto out;
2177                 }
2178                 session->cipher_alg = cipher_xform->algo;
2179         } else {
2180                 session->cipher_key.data = NULL;
2181                 session->cipher_key.length = 0;
2182                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2183         }
2184
2185         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2186                 session->auth_key.data = rte_zmalloc(NULL,
2187                                                 auth_xform->key.length,
2188                                                 RTE_CACHE_LINE_SIZE);
2189                 if (session->auth_key.data == NULL &&
2190                                 auth_xform->key.length > 0) {
2191                         DPAA_SEC_ERR("No Memory for auth key");
2192                         rte_free(session->cipher_key.data);
2193                         return -ENOMEM;
2194                 }
2195                 memcpy(session->auth_key.data, auth_xform->key.data,
2196                                 auth_xform->key.length);
2197                 session->auth_key.length = auth_xform->key.length;
2198
2199                 switch (auth_xform->algo) {
2200                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2201                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2202                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2203                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2204                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2205                 case RTE_CRYPTO_AUTH_AES_CMAC:
2206                         break;
2207                 default:
2208                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2209                                 auth_xform->algo);
2210                         goto out;
2211                 }
2212                 session->auth_alg = auth_xform->algo;
2213         } else {
2214                 session->auth_key.data = NULL;
2215                 session->auth_key.length = 0;
2216                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2217         }
2218
2219         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2220                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2221                                 sizeof(session->ip4_hdr));
2222                 session->ip4_hdr.ip_v = IPVERSION;
2223                 session->ip4_hdr.ip_hl = 5;
2224                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2225                                                 sizeof(session->ip4_hdr));
2226                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2227                 session->ip4_hdr.ip_id = 0;
2228                 session->ip4_hdr.ip_off = 0;
2229                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2230                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2231                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2232                                 : IPPROTO_AH;
2233                 session->ip4_hdr.ip_sum = 0;
2234                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2235                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2236                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2237                                                 (void *)&session->ip4_hdr,
2238                                                 sizeof(struct ip));
2239
2240                 session->encap_pdb.options =
2241                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2242                         PDBOPTS_ESP_OIHI_PDB_INL |
2243                         PDBOPTS_ESP_IVSRC |
2244                         PDBHMO_ESP_ENCAP_DTTL |
2245                         PDBHMO_ESP_SNR;
2246                 session->encap_pdb.spi = ipsec_xform->spi;
2247                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2248
2249                 session->dir = DIR_ENC;
2250         } else if (ipsec_xform->direction ==
2251                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2252                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2253                 session->decap_pdb.options = sizeof(struct ip) << 16;
2254                 session->dir = DIR_DEC;
2255         } else
2256                 goto out;
2257         session->ctx_pool = internals->ctx_pool;
2258         rte_spinlock_lock(&internals->lock);
2259         session->inq = dpaa_sec_attach_rxq(internals);
2260         rte_spinlock_unlock(&internals->lock);
2261         if (session->inq == NULL) {
2262                 DPAA_SEC_ERR("unable to attach sec queue");
2263                 goto out;
2264         }
2265
2266         return 0;
2267 out:
2268         rte_free(session->auth_key.data);
2269         rte_free(session->cipher_key.data);
2270         memset(session, 0, sizeof(dpaa_sec_session));
2271         return -1;
2272 }
2273
2274 static int
2275 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2276                           struct rte_security_session_conf *conf,
2277                           void *sess)
2278 {
2279         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2280         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2281         struct rte_crypto_auth_xform *auth_xform = NULL;
2282         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2283         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2284         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2285
2286         PMD_INIT_FUNC_TRACE();
2287
2288         memset(session, 0, sizeof(dpaa_sec_session));
2289
2290         /* find xfrm types */
2291         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2292                 cipher_xform = &xform->cipher;
2293                 if (xform->next != NULL)
2294                         auth_xform = &xform->next->auth;
2295         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2296                 auth_xform = &xform->auth;
2297                 if (xform->next != NULL)
2298                         cipher_xform = &xform->next->cipher;
2299         } else {
2300                 DPAA_SEC_ERR("Invalid crypto type");
2301                 return -EINVAL;
2302         }
2303
2304         session->proto_alg = conf->protocol;
2305         if (cipher_xform) {
2306                 session->cipher_key.data = rte_zmalloc(NULL,
2307                                                cipher_xform->key.length,
2308                                                RTE_CACHE_LINE_SIZE);
2309                 if (session->cipher_key.data == NULL &&
2310                                 cipher_xform->key.length > 0) {
2311                         DPAA_SEC_ERR("No Memory for cipher key");
2312                         return -ENOMEM;
2313                 }
2314                 session->cipher_key.length = cipher_xform->key.length;
2315                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2316                         cipher_xform->key.length);
2317                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2318                                         DIR_ENC : DIR_DEC;
2319                 session->cipher_alg = cipher_xform->algo;
2320         } else {
2321                 session->cipher_key.data = NULL;
2322                 session->cipher_key.length = 0;
2323                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2324                 session->dir = DIR_ENC;
2325         }
2326
2327         /* Auth is only applicable for control mode operation. */
2328         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2329                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2330                         DPAA_SEC_ERR(
2331                                 "PDCP Seq Num size should be 5 bits for cmode");
2332                         goto out;
2333                 }
2334                 if (auth_xform) {
2335                         session->auth_key.data = rte_zmalloc(NULL,
2336                                                         auth_xform->key.length,
2337                                                         RTE_CACHE_LINE_SIZE);
2338                         if (session->auth_key.data == NULL &&
2339                                         auth_xform->key.length > 0) {
2340                                 DPAA_SEC_ERR("No Memory for auth key");
2341                                 rte_free(session->cipher_key.data);
2342                                 return -ENOMEM;
2343                         }
2344                         session->auth_key.length = auth_xform->key.length;
2345                         memcpy(session->auth_key.data, auth_xform->key.data,
2346                                         auth_xform->key.length);
2347                         session->auth_alg = auth_xform->algo;
2348                 } else {
2349                         session->auth_key.data = NULL;
2350                         session->auth_key.length = 0;
2351                         session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2352                 }
2353         }
2354         session->pdcp.domain = pdcp_xform->domain;
2355         session->pdcp.bearer = pdcp_xform->bearer;
2356         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2357         session->pdcp.sn_size = pdcp_xform->sn_size;
2358 #ifdef ENABLE_HFN_OVERRIDE
2359         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2360 #endif
2361         session->pdcp.hfn = pdcp_xform->hfn;
2362         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2363
2364         session->ctx_pool = dev_priv->ctx_pool;
2365         rte_spinlock_lock(&dev_priv->lock);
2366         session->inq = dpaa_sec_attach_rxq(dev_priv);
2367         rte_spinlock_unlock(&dev_priv->lock);
2368         if (session->inq == NULL) {
2369                 DPAA_SEC_ERR("unable to attach sec queue");
2370                 goto out;
2371         }
2372         return 0;
2373 out:
2374         rte_free(session->auth_key.data);
2375         rte_free(session->cipher_key.data);
2376         memset(session, 0, sizeof(dpaa_sec_session));
2377         return -1;
2378 }
2379
2380 static int
2381 dpaa_sec_security_session_create(void *dev,
2382                                  struct rte_security_session_conf *conf,
2383                                  struct rte_security_session *sess,
2384                                  struct rte_mempool *mempool)
2385 {
2386         void *sess_private_data;
2387         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2388         int ret;
2389
2390         if (rte_mempool_get(mempool, &sess_private_data)) {
2391                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2392                 return -ENOMEM;
2393         }
2394
2395         switch (conf->protocol) {
2396         case RTE_SECURITY_PROTOCOL_IPSEC:
2397                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2398                                 sess_private_data);
2399                 break;
2400         case RTE_SECURITY_PROTOCOL_PDCP:
2401                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2402                                 sess_private_data);
2403                 break;
2404         case RTE_SECURITY_PROTOCOL_MACSEC:
2405                 return -ENOTSUP;
2406         default:
2407                 return -EINVAL;
2408         }
2409         if (ret != 0) {
2410                 DPAA_SEC_ERR("failed to configure session parameters");
2411                 /* Return session to mempool */
2412                 rte_mempool_put(mempool, sess_private_data);
2413                 return ret;
2414         }
2415
2416         set_sec_session_private_data(sess, sess_private_data);
2417
2418         return ret;
2419 }
2420
2421 /** Clear the memory of session so it doesn't leave key material behind */
2422 static int
2423 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2424                 struct rte_security_session *sess)
2425 {
2426         PMD_INIT_FUNC_TRACE();
2427         void *sess_priv = get_sec_session_private_data(sess);
2428
2429         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2430
2431         if (sess_priv) {
2432                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2433
2434                 rte_free(s->cipher_key.data);
2435                 rte_free(s->auth_key.data);
2436                 memset(sess, 0, sizeof(dpaa_sec_session));
2437                 set_sec_session_private_data(sess, NULL);
2438                 rte_mempool_put(sess_mp, sess_priv);
2439         }
2440         return 0;
2441 }
2442
2443
2444 static int
2445 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2446                        struct rte_cryptodev_config *config __rte_unused)
2447 {
2448
2449         char str[20];
2450         struct dpaa_sec_dev_private *internals;
2451
2452         PMD_INIT_FUNC_TRACE();
2453
2454         internals = dev->data->dev_private;
2455         sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2456         if (!internals->ctx_pool) {
2457                 internals->ctx_pool = rte_mempool_create((const char *)str,
2458                                                         CTX_POOL_NUM_BUFS,
2459                                                         CTX_POOL_BUF_SIZE,
2460                                                         CTX_POOL_CACHE_SIZE, 0,
2461                                                         NULL, NULL, NULL, NULL,
2462                                                         SOCKET_ID_ANY, 0);
2463                 if (!internals->ctx_pool) {
2464                         DPAA_SEC_ERR("%s create failed\n", str);
2465                         return -ENOMEM;
2466                 }
2467         } else
2468                 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2469                                 dev->data->dev_id);
2470
2471         return 0;
2472 }
2473
2474 static int
2475 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2476 {
2477         PMD_INIT_FUNC_TRACE();
2478         return 0;
2479 }
2480
2481 static void
2482 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2483 {
2484         PMD_INIT_FUNC_TRACE();
2485 }
2486
2487 static int
2488 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2489 {
2490         struct dpaa_sec_dev_private *internals;
2491
2492         PMD_INIT_FUNC_TRACE();
2493
2494         if (dev == NULL)
2495                 return -ENOMEM;
2496
2497         internals = dev->data->dev_private;
2498         rte_mempool_free(internals->ctx_pool);
2499         internals->ctx_pool = NULL;
2500
2501         return 0;
2502 }
2503
2504 static void
2505 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2506                        struct rte_cryptodev_info *info)
2507 {
2508         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2509
2510         PMD_INIT_FUNC_TRACE();
2511         if (info != NULL) {
2512                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2513                 info->feature_flags = dev->feature_flags;
2514                 info->capabilities = dpaa_sec_capabilities;
2515                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2516                 info->driver_id = cryptodev_driver_id;
2517         }
2518 }
2519
2520 static struct rte_cryptodev_ops crypto_ops = {
2521         .dev_configure        = dpaa_sec_dev_configure,
2522         .dev_start            = dpaa_sec_dev_start,
2523         .dev_stop             = dpaa_sec_dev_stop,
2524         .dev_close            = dpaa_sec_dev_close,
2525         .dev_infos_get        = dpaa_sec_dev_infos_get,
2526         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2527         .queue_pair_release   = dpaa_sec_queue_pair_release,
2528         .queue_pair_count     = dpaa_sec_queue_pair_count,
2529         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2530         .sym_session_configure    = dpaa_sec_sym_session_configure,
2531         .sym_session_clear        = dpaa_sec_sym_session_clear
2532 };
2533
2534 static const struct rte_security_capability *
2535 dpaa_sec_capabilities_get(void *device __rte_unused)
2536 {
2537         return dpaa_sec_security_cap;
2538 }
2539
2540 static const struct rte_security_ops dpaa_sec_security_ops = {
2541         .session_create = dpaa_sec_security_session_create,
2542         .session_update = NULL,
2543         .session_stats_get = NULL,
2544         .session_destroy = dpaa_sec_security_session_destroy,
2545         .set_pkt_metadata = NULL,
2546         .capabilities_get = dpaa_sec_capabilities_get
2547 };
2548
2549 static int
2550 dpaa_sec_uninit(struct rte_cryptodev *dev)
2551 {
2552         struct dpaa_sec_dev_private *internals;
2553
2554         if (dev == NULL)
2555                 return -ENODEV;
2556
2557         internals = dev->data->dev_private;
2558         rte_free(dev->security_ctx);
2559
2560         /* In case close has been called, internals->ctx_pool would be NULL */
2561         rte_mempool_free(internals->ctx_pool);
2562         rte_free(internals);
2563
2564         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2565                       dev->data->name, rte_socket_id());
2566
2567         return 0;
2568 }
2569
2570 static int
2571 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2572 {
2573         struct dpaa_sec_dev_private *internals;
2574         struct rte_security_ctx *security_instance;
2575         struct dpaa_sec_qp *qp;
2576         uint32_t i, flags;
2577         int ret;
2578
2579         PMD_INIT_FUNC_TRACE();
2580
2581         cryptodev->driver_id = cryptodev_driver_id;
2582         cryptodev->dev_ops = &crypto_ops;
2583
2584         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2585         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2586         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2587                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2588                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2589                         RTE_CRYPTODEV_FF_SECURITY |
2590                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2591                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2592                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2593                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2594                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2595
2596         internals = cryptodev->data->dev_private;
2597         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2598         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2599
2600         /*
2601          * For secondary processes, we don't initialise any further as primary
2602          * has already done this work. Only check we don't need a different
2603          * RX function
2604          */
2605         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2606                 DPAA_SEC_WARN("Device already init by primary process");
2607                 return 0;
2608         }
2609
2610         /* Initialize security_ctx only for primary process*/
2611         security_instance = rte_malloc("rte_security_instances_ops",
2612                                 sizeof(struct rte_security_ctx), 0);
2613         if (security_instance == NULL)
2614                 return -ENOMEM;
2615         security_instance->device = (void *)cryptodev;
2616         security_instance->ops = &dpaa_sec_security_ops;
2617         security_instance->sess_cnt = 0;
2618         cryptodev->security_ctx = security_instance;
2619
2620         rte_spinlock_init(&internals->lock);
2621         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2622                 /* init qman fq for queue pair */
2623                 qp = &internals->qps[i];
2624                 ret = dpaa_sec_init_tx(&qp->outq);
2625                 if (ret) {
2626                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2627                         goto init_error;
2628                 }
2629         }
2630
2631         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2632                 QMAN_FQ_FLAG_TO_DCPORTAL;
2633         for (i = 0; i < internals->max_nb_sessions; i++) {
2634                 /* create rx qman fq for sessions*/
2635                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2636                 if (unlikely(ret != 0)) {
2637                         DPAA_SEC_ERR("sec qman_create_fq failed");
2638                         goto init_error;
2639                 }
2640         }
2641
2642         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2643         return 0;
2644
2645 init_error:
2646         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2647
2648         dpaa_sec_uninit(cryptodev);
2649         return -EFAULT;
2650 }
2651
2652 static int
2653 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2654                                 struct rte_dpaa_device *dpaa_dev)
2655 {
2656         struct rte_cryptodev *cryptodev;
2657         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2658
2659         int retval;
2660
2661         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2662
2663         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2664         if (cryptodev == NULL)
2665                 return -ENOMEM;
2666
2667         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2668                 cryptodev->data->dev_private = rte_zmalloc_socket(
2669                                         "cryptodev private structure",
2670                                         sizeof(struct dpaa_sec_dev_private),
2671                                         RTE_CACHE_LINE_SIZE,
2672                                         rte_socket_id());
2673
2674                 if (cryptodev->data->dev_private == NULL)
2675                         rte_panic("Cannot allocate memzone for private "
2676                                         "device data");
2677         }
2678
2679         dpaa_dev->crypto_dev = cryptodev;
2680         cryptodev->device = &dpaa_dev->device;
2681
2682         /* init user callbacks */
2683         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2684
2685         /* if sec device version is not configured */
2686         if (!rta_get_sec_era()) {
2687                 const struct device_node *caam_node;
2688
2689                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2690                         const uint32_t *prop = of_get_property(caam_node,
2691                                         "fsl,sec-era",
2692                                         NULL);
2693                         if (prop) {
2694                                 rta_set_sec_era(
2695                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2696                                 break;
2697                         }
2698                 }
2699         }
2700
2701         /* Invoke PMD device initialization function */
2702         retval = dpaa_sec_dev_init(cryptodev);
2703         if (retval == 0)
2704                 return 0;
2705
2706         /* In case of error, cleanup is done */
2707         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2708                 rte_free(cryptodev->data->dev_private);
2709
2710         rte_cryptodev_pmd_release_device(cryptodev);
2711
2712         return -ENXIO;
2713 }
2714
2715 static int
2716 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2717 {
2718         struct rte_cryptodev *cryptodev;
2719         int ret;
2720
2721         cryptodev = dpaa_dev->crypto_dev;
2722         if (cryptodev == NULL)
2723                 return -ENODEV;
2724
2725         ret = dpaa_sec_uninit(cryptodev);
2726         if (ret)
2727                 return ret;
2728
2729         return rte_cryptodev_pmd_destroy(cryptodev);
2730 }
2731
2732 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2733         .drv_type = FSL_DPAA_CRYPTO,
2734         .driver = {
2735                 .name = "DPAA SEC PMD"
2736         },
2737         .probe = cryptodev_dpaa_sec_probe,
2738         .remove = cryptodev_dpaa_sec_remove,
2739 };
2740
2741 static struct cryptodev_driver dpaa_sec_crypto_drv;
2742
2743 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2744 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2745                 cryptodev_driver_id);
2746
2747 RTE_INIT(dpaa_sec_init_log)
2748 {
2749         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2750         if (dpaa_logtype_sec >= 0)
2751                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2752 }