8305f19a312e4be8266bab0fffaa07eebf8e6b6c
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_log.h>
41
42 enum rta_sec_era rta_sec_era;
43
44 int dpaa_logtype_sec;
45
46 static uint8_t cryptodev_driver_id;
47
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
50
51 static int
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53
54 static inline void
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 {
57         if (!ctx->fd_status) {
58                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59         } else {
60                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62         }
63
64         /* report op status to sym->op and then free the ctx memeory  */
65         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
66 }
67
68 static inline struct dpaa_sec_op_ctx *
69 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
70 {
71         struct dpaa_sec_op_ctx *ctx;
72         int retval;
73
74         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
75         if (!ctx || retval) {
76                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
77                 return NULL;
78         }
79         /*
80          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
81          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
82          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
83          * each packet, memset is costlier than dcbz_64().
84          */
85         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
86         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
87         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
88         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
89
90         ctx->ctx_pool = ses->ctx_pool;
91         ctx->vtop_offset = (size_t) ctx
92                                 - rte_mempool_virt2iova(ctx);
93
94         return ctx;
95 }
96
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
99 {
100         const struct rte_memseg *ms;
101
102         ms = rte_mem_virt2memseg(vaddr, NULL);
103         if (ms)
104                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
105         return (size_t)NULL;
106 }
107
108 static inline void *
109 dpaa_mem_ptov(rte_iova_t paddr)
110 {
111         void *va;
112
113         va = (void *)dpaax_iova_table_get_va(paddr);
114         if (likely(va))
115                 return va;
116
117         return rte_mem_iova2virt(paddr);
118 }
119
120 static void
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122                    struct qman_fq *fq,
123                    const struct qm_mr_entry *msg)
124 {
125         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
127 }
128
129 /* initialize the queue with dest chan as caam chan so that
130  * all the packets in this queue could be dispatched into caam
131  */
132 static int
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
134                  uint32_t fqid_out)
135 {
136         struct qm_mcc_initfq fq_opts;
137         uint32_t flags;
138         int ret = -1;
139
140         /* Clear FQ options */
141         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142
143         flags = QMAN_INITFQ_FLAG_SCHED;
144         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145                           QM_INITFQ_WE_CONTEXTB;
146
147         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148         fq_opts.fqd.context_b = fqid_out;
149         fq_opts.fqd.dest.channel = qm_channel_caam;
150         fq_opts.fqd.dest.wq = 0;
151
152         fq_in->cb.ern  = ern_sec_fq_handler;
153
154         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155
156         ret = qman_init_fq(fq_in, flags, &fq_opts);
157         if (unlikely(ret != 0))
158                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
159
160         return ret;
161 }
162
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166                   struct qman_fq *fq __always_unused,
167                   const struct qm_dqrr_entry *dqrr)
168 {
169         const struct qm_fd *fd;
170         struct dpaa_sec_job *job;
171         struct dpaa_sec_op_ctx *ctx;
172
173         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174                 return qman_cb_dqrr_defer;
175
176         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177                 return qman_cb_dqrr_consume;
178
179         fd = &dqrr->fd;
180         /* sg is embedded in an op ctx,
181          * sg[0] is for output
182          * sg[1] for input
183          */
184         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185
186         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187         ctx->fd_status = fd->status;
188         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189                 struct qm_sg_entry *sg_out;
190                 uint32_t len;
191
192                 sg_out = &job->sg[0];
193                 hw_sg_to_cpu(sg_out);
194                 len = sg_out->length;
195                 ctx->op->sym->m_src->pkt_len = len;
196                 ctx->op->sym->m_src->data_len = len;
197         }
198         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
199         dpaa_sec_op_ending(ctx);
200
201         return qman_cb_dqrr_consume;
202 }
203
204 /* caam result is put into this queue */
205 static int
206 dpaa_sec_init_tx(struct qman_fq *fq)
207 {
208         int ret;
209         struct qm_mcc_initfq opts;
210         uint32_t flags;
211
212         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
213                 QMAN_FQ_FLAG_DYNAMIC_FQID;
214
215         ret = qman_create_fq(0, flags, fq);
216         if (unlikely(ret)) {
217                 DPAA_SEC_ERR("qman_create_fq failed");
218                 return ret;
219         }
220
221         memset(&opts, 0, sizeof(opts));
222         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
223                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
224
225         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
226
227         fq->cb.dqrr = dqrr_out_fq_cb_rx;
228         fq->cb.ern  = ern_sec_fq_handler;
229
230         ret = qman_init_fq(fq, 0, &opts);
231         if (unlikely(ret)) {
232                 DPAA_SEC_ERR("unable to init caam source fq!");
233                 return ret;
234         }
235
236         return ret;
237 }
238
239 static inline int is_cipher_only(dpaa_sec_session *ses)
240 {
241         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
242                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
243 }
244
245 static inline int is_auth_only(dpaa_sec_session *ses)
246 {
247         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
248                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
249 }
250
251 static inline int is_aead(dpaa_sec_session *ses)
252 {
253         return ((ses->cipher_alg == 0) &&
254                 (ses->auth_alg == 0) &&
255                 (ses->aead_alg != 0));
256 }
257
258 static inline int is_auth_cipher(dpaa_sec_session *ses)
259 {
260         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
261                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
262                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
263 }
264
265 static inline int is_proto_ipsec(dpaa_sec_session *ses)
266 {
267         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
268 }
269
270 static inline int is_proto_pdcp(dpaa_sec_session *ses)
271 {
272         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
273 }
274
275 static inline int is_encode(dpaa_sec_session *ses)
276 {
277         return ses->dir == DIR_ENC;
278 }
279
280 static inline int is_decode(dpaa_sec_session *ses)
281 {
282         return ses->dir == DIR_DEC;
283 }
284
285 static inline void
286 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
287 {
288         switch (ses->auth_alg) {
289         case RTE_CRYPTO_AUTH_NULL:
290                 alginfo_a->algtype =
291                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292                         OP_PCL_IPSEC_HMAC_NULL : 0;
293                 ses->digest_length = 0;
294                 break;
295         case RTE_CRYPTO_AUTH_MD5_HMAC:
296                 alginfo_a->algtype =
297                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
299                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
300                 break;
301         case RTE_CRYPTO_AUTH_SHA1_HMAC:
302                 alginfo_a->algtype =
303                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
305                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
306                 break;
307         case RTE_CRYPTO_AUTH_SHA224_HMAC:
308                 alginfo_a->algtype =
309                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
311                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
312                 break;
313         case RTE_CRYPTO_AUTH_SHA256_HMAC:
314                 alginfo_a->algtype =
315                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
317                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
318                 break;
319         case RTE_CRYPTO_AUTH_SHA384_HMAC:
320                 alginfo_a->algtype =
321                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
322                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
323                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
324                 break;
325         case RTE_CRYPTO_AUTH_SHA512_HMAC:
326                 alginfo_a->algtype =
327                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
328                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
329                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
330                 break;
331         default:
332                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
333         }
334 }
335
336 static inline void
337 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
338 {
339         switch (ses->cipher_alg) {
340         case RTE_CRYPTO_CIPHER_NULL:
341                 alginfo_c->algtype =
342                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
343                         OP_PCL_IPSEC_NULL : 0;
344                 break;
345         case RTE_CRYPTO_CIPHER_AES_CBC:
346                 alginfo_c->algtype =
347                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
349                 alginfo_c->algmode = OP_ALG_AAI_CBC;
350                 break;
351         case RTE_CRYPTO_CIPHER_3DES_CBC:
352                 alginfo_c->algtype =
353                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
354                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
355                 alginfo_c->algmode = OP_ALG_AAI_CBC;
356                 break;
357         case RTE_CRYPTO_CIPHER_AES_CTR:
358                 alginfo_c->algtype =
359                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
360                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
361                 alginfo_c->algmode = OP_ALG_AAI_CTR;
362                 break;
363         default:
364                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
365         }
366 }
367
368 static inline void
369 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
370 {
371         switch (ses->aead_alg) {
372         case RTE_CRYPTO_AEAD_AES_GCM:
373                 alginfo->algtype = OP_ALG_ALGSEL_AES;
374                 alginfo->algmode = OP_ALG_AAI_GCM;
375                 break;
376         default:
377                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
378         }
379 }
380
381 static int
382 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
383 {
384         struct alginfo authdata = {0}, cipherdata = {0};
385         struct sec_cdb *cdb = &ses->cdb;
386         int32_t shared_desc_len = 0;
387         int err;
388 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
389         int swap = false;
390 #else
391         int swap = true;
392 #endif
393
394         switch (ses->cipher_alg) {
395         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
396                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
397                 break;
398         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
399                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
400                 break;
401         case RTE_CRYPTO_CIPHER_AES_CTR:
402                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
403                 break;
404         case RTE_CRYPTO_CIPHER_NULL:
405                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
406                 break;
407         default:
408                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
409                               ses->cipher_alg);
410                 return -1;
411         }
412
413         cipherdata.key = (size_t)ses->cipher_key.data;
414         cipherdata.keylen = ses->cipher_key.length;
415         cipherdata.key_enc_flags = 0;
416         cipherdata.key_type = RTA_DATA_IMM;
417
418         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
419                 switch (ses->auth_alg) {
420                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
421                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
422                         break;
423                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
424                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
425                         break;
426                 case RTE_CRYPTO_AUTH_AES_CMAC:
427                         authdata.algtype = PDCP_AUTH_TYPE_AES;
428                         break;
429                 case RTE_CRYPTO_AUTH_NULL:
430                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
431                         break;
432                 default:
433                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
434                                       ses->auth_alg);
435                         return -1;
436                 }
437
438                 authdata.key = (size_t)ses->auth_key.data;
439                 authdata.keylen = ses->auth_key.length;
440                 authdata.key_enc_flags = 0;
441                 authdata.key_type = RTA_DATA_IMM;
442
443                 cdb->sh_desc[0] = cipherdata.keylen;
444                 cdb->sh_desc[1] = authdata.keylen;
445                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
446                                        MIN_JOB_DESC_SIZE,
447                                        (unsigned int *)cdb->sh_desc,
448                                        &cdb->sh_desc[2], 2);
449
450                 if (err < 0) {
451                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
452                         return err;
453                 }
454                 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
455                         cipherdata.key = (size_t)dpaa_mem_vtop(
456                                                 (void *)(size_t)cipherdata.key);
457                         cipherdata.key_type = RTA_DATA_PTR;
458                 }
459                 if (!(cdb->sh_desc[2] & (1<<1)) &&  authdata.keylen) {
460                         authdata.key = (size_t)dpaa_mem_vtop(
461                                                 (void *)(size_t)authdata.key);
462                         authdata.key_type = RTA_DATA_PTR;
463                 }
464
465                 cdb->sh_desc[0] = 0;
466                 cdb->sh_desc[1] = 0;
467                 cdb->sh_desc[2] = 0;
468
469                 if (ses->dir == DIR_ENC)
470                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
471                                         cdb->sh_desc, 1, swap,
472                                         ses->pdcp.hfn,
473                                         ses->pdcp.bearer,
474                                         ses->pdcp.pkt_dir,
475                                         ses->pdcp.hfn_threshold,
476                                         &cipherdata, &authdata,
477                                         0);
478                 else if (ses->dir == DIR_DEC)
479                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
480                                         cdb->sh_desc, 1, swap,
481                                         ses->pdcp.hfn,
482                                         ses->pdcp.bearer,
483                                         ses->pdcp.pkt_dir,
484                                         ses->pdcp.hfn_threshold,
485                                         &cipherdata, &authdata,
486                                         0);
487         } else {
488                 cdb->sh_desc[0] = cipherdata.keylen;
489                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
490                                        MIN_JOB_DESC_SIZE,
491                                        (unsigned int *)cdb->sh_desc,
492                                        &cdb->sh_desc[2], 1);
493
494                 if (err < 0) {
495                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
496                         return err;
497                 }
498                 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
499                         cipherdata.key = (size_t)dpaa_mem_vtop(
500                                                 (void *)(size_t)cipherdata.key);
501                         cipherdata.key_type = RTA_DATA_PTR;
502                 }
503                 cdb->sh_desc[0] = 0;
504                 cdb->sh_desc[1] = 0;
505                 cdb->sh_desc[2] = 0;
506
507                 if (ses->dir == DIR_ENC)
508                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509                                         cdb->sh_desc, 1, swap,
510                                         ses->pdcp.sn_size,
511                                         ses->pdcp.hfn,
512                                         ses->pdcp.bearer,
513                                         ses->pdcp.pkt_dir,
514                                         ses->pdcp.hfn_threshold,
515                                         &cipherdata, 0);
516                 else if (ses->dir == DIR_DEC)
517                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518                                         cdb->sh_desc, 1, swap,
519                                         ses->pdcp.sn_size,
520                                         ses->pdcp.hfn,
521                                         ses->pdcp.bearer,
522                                         ses->pdcp.pkt_dir,
523                                         ses->pdcp.hfn_threshold,
524                                         &cipherdata, 0);
525         }
526
527         return shared_desc_len;
528 }
529
530 /* prepare ipsec proto command block of the session */
531 static int
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
533 {
534         struct alginfo cipherdata = {0}, authdata = {0};
535         struct sec_cdb *cdb = &ses->cdb;
536         int32_t shared_desc_len = 0;
537         int err;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
539         int swap = false;
540 #else
541         int swap = true;
542 #endif
543
544         caam_cipher_alg(ses, &cipherdata);
545         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546                 DPAA_SEC_ERR("not supported cipher alg");
547                 return -ENOTSUP;
548         }
549
550         cipherdata.key = (size_t)ses->cipher_key.data;
551         cipherdata.keylen = ses->cipher_key.length;
552         cipherdata.key_enc_flags = 0;
553         cipherdata.key_type = RTA_DATA_IMM;
554
555         caam_auth_alg(ses, &authdata);
556         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557                 DPAA_SEC_ERR("not supported auth alg");
558                 return -ENOTSUP;
559         }
560
561         authdata.key = (size_t)ses->auth_key.data;
562         authdata.keylen = ses->auth_key.length;
563         authdata.key_enc_flags = 0;
564         authdata.key_type = RTA_DATA_IMM;
565
566         cdb->sh_desc[0] = cipherdata.keylen;
567         cdb->sh_desc[1] = authdata.keylen;
568         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
569                                MIN_JOB_DESC_SIZE,
570                                (unsigned int *)cdb->sh_desc,
571                                &cdb->sh_desc[2], 2);
572
573         if (err < 0) {
574                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
575                 return err;
576         }
577         if (cdb->sh_desc[2] & 1)
578                 cipherdata.key_type = RTA_DATA_IMM;
579         else {
580                 cipherdata.key = (size_t)dpaa_mem_vtop(
581                                         (void *)(size_t)cipherdata.key);
582                 cipherdata.key_type = RTA_DATA_PTR;
583         }
584         if (cdb->sh_desc[2] & (1<<1))
585                 authdata.key_type = RTA_DATA_IMM;
586         else {
587                 authdata.key = (size_t)dpaa_mem_vtop(
588                                         (void *)(size_t)authdata.key);
589                 authdata.key_type = RTA_DATA_PTR;
590         }
591
592         cdb->sh_desc[0] = 0;
593         cdb->sh_desc[1] = 0;
594         cdb->sh_desc[2] = 0;
595         if (ses->dir == DIR_ENC) {
596                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
597                                 cdb->sh_desc,
598                                 true, swap, SHR_SERIAL,
599                                 &ses->encap_pdb,
600                                 (uint8_t *)&ses->ip4_hdr,
601                                 &cipherdata, &authdata);
602         } else if (ses->dir == DIR_DEC) {
603                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
604                                 cdb->sh_desc,
605                                 true, swap, SHR_SERIAL,
606                                 &ses->decap_pdb,
607                                 &cipherdata, &authdata);
608         }
609         return shared_desc_len;
610 }
611
612 /* prepare command block of the session */
613 static int
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
615 {
616         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617         int32_t shared_desc_len = 0;
618         struct sec_cdb *cdb = &ses->cdb;
619         int err;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
621         int swap = false;
622 #else
623         int swap = true;
624 #endif
625
626         memset(cdb, 0, sizeof(struct sec_cdb));
627
628         if (is_proto_ipsec(ses)) {
629                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630         } else if (is_proto_pdcp(ses)) {
631                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632         } else if (is_cipher_only(ses)) {
633                 caam_cipher_alg(ses, &alginfo_c);
634                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
635                         DPAA_SEC_ERR("not supported cipher alg");
636                         return -ENOTSUP;
637                 }
638
639                 alginfo_c.key = (size_t)ses->cipher_key.data;
640                 alginfo_c.keylen = ses->cipher_key.length;
641                 alginfo_c.key_enc_flags = 0;
642                 alginfo_c.key_type = RTA_DATA_IMM;
643
644                 shared_desc_len = cnstr_shdsc_blkcipher(
645                                                 cdb->sh_desc, true,
646                                                 swap, SHR_NEVER, &alginfo_c,
647                                                 NULL,
648                                                 ses->iv.length,
649                                                 ses->dir);
650         } else if (is_auth_only(ses)) {
651                 caam_auth_alg(ses, &alginfo_a);
652                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
653                         DPAA_SEC_ERR("not supported auth alg");
654                         return -ENOTSUP;
655                 }
656
657                 alginfo_a.key = (size_t)ses->auth_key.data;
658                 alginfo_a.keylen = ses->auth_key.length;
659                 alginfo_a.key_enc_flags = 0;
660                 alginfo_a.key_type = RTA_DATA_IMM;
661
662                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
663                                                    swap, SHR_NEVER, &alginfo_a,
664                                                    !ses->dir,
665                                                    ses->digest_length);
666         } else if (is_aead(ses)) {
667                 caam_aead_alg(ses, &alginfo);
668                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
669                         DPAA_SEC_ERR("not supported aead alg");
670                         return -ENOTSUP;
671                 }
672                 alginfo.key = (size_t)ses->aead_key.data;
673                 alginfo.keylen = ses->aead_key.length;
674                 alginfo.key_enc_flags = 0;
675                 alginfo.key_type = RTA_DATA_IMM;
676
677                 if (ses->dir == DIR_ENC)
678                         shared_desc_len = cnstr_shdsc_gcm_encap(
679                                         cdb->sh_desc, true, swap, SHR_NEVER,
680                                         &alginfo,
681                                         ses->iv.length,
682                                         ses->digest_length);
683                 else
684                         shared_desc_len = cnstr_shdsc_gcm_decap(
685                                         cdb->sh_desc, true, swap, SHR_NEVER,
686                                         &alginfo,
687                                         ses->iv.length,
688                                         ses->digest_length);
689         } else {
690                 caam_cipher_alg(ses, &alginfo_c);
691                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
692                         DPAA_SEC_ERR("not supported cipher alg");
693                         return -ENOTSUP;
694                 }
695
696                 alginfo_c.key = (size_t)ses->cipher_key.data;
697                 alginfo_c.keylen = ses->cipher_key.length;
698                 alginfo_c.key_enc_flags = 0;
699                 alginfo_c.key_type = RTA_DATA_IMM;
700
701                 caam_auth_alg(ses, &alginfo_a);
702                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
703                         DPAA_SEC_ERR("not supported auth alg");
704                         return -ENOTSUP;
705                 }
706
707                 alginfo_a.key = (size_t)ses->auth_key.data;
708                 alginfo_a.keylen = ses->auth_key.length;
709                 alginfo_a.key_enc_flags = 0;
710                 alginfo_a.key_type = RTA_DATA_IMM;
711
712                 cdb->sh_desc[0] = alginfo_c.keylen;
713                 cdb->sh_desc[1] = alginfo_a.keylen;
714                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
715                                        MIN_JOB_DESC_SIZE,
716                                        (unsigned int *)cdb->sh_desc,
717                                        &cdb->sh_desc[2], 2);
718
719                 if (err < 0) {
720                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
721                         return err;
722                 }
723                 if (cdb->sh_desc[2] & 1)
724                         alginfo_c.key_type = RTA_DATA_IMM;
725                 else {
726                         alginfo_c.key = (size_t)dpaa_mem_vtop(
727                                                 (void *)(size_t)alginfo_c.key);
728                         alginfo_c.key_type = RTA_DATA_PTR;
729                 }
730                 if (cdb->sh_desc[2] & (1<<1))
731                         alginfo_a.key_type = RTA_DATA_IMM;
732                 else {
733                         alginfo_a.key = (size_t)dpaa_mem_vtop(
734                                                 (void *)(size_t)alginfo_a.key);
735                         alginfo_a.key_type = RTA_DATA_PTR;
736                 }
737                 cdb->sh_desc[0] = 0;
738                 cdb->sh_desc[1] = 0;
739                 cdb->sh_desc[2] = 0;
740                 /* Auth_only_len is set as 0 here and it will be
741                  * overwritten in fd for each packet.
742                  */
743                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
744                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
745                                 ses->iv.length, 0,
746                                 ses->digest_length, ses->dir);
747         }
748
749         if (shared_desc_len < 0) {
750                 DPAA_SEC_ERR("error in preparing command block");
751                 return shared_desc_len;
752         }
753
754         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
755         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
756         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
757
758         return 0;
759 }
760
761 /* qp is lockless, should be accessed by only one thread */
762 static int
763 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
764 {
765         struct qman_fq *fq;
766         unsigned int pkts = 0;
767         int num_rx_bufs, ret;
768         struct qm_dqrr_entry *dq;
769         uint32_t vdqcr_flags = 0;
770
771         fq = &qp->outq;
772         /*
773          * Until request for four buffers, we provide exact number of buffers.
774          * Otherwise we do not set the QM_VDQCR_EXACT flag.
775          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
776          * requested, so we request two less in this case.
777          */
778         if (nb_ops < 4) {
779                 vdqcr_flags = QM_VDQCR_EXACT;
780                 num_rx_bufs = nb_ops;
781         } else {
782                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
783                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
784         }
785         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
786         if (ret)
787                 return 0;
788
789         do {
790                 const struct qm_fd *fd;
791                 struct dpaa_sec_job *job;
792                 struct dpaa_sec_op_ctx *ctx;
793                 struct rte_crypto_op *op;
794
795                 dq = qman_dequeue(fq);
796                 if (!dq)
797                         continue;
798
799                 fd = &dq->fd;
800                 /* sg is embedded in an op ctx,
801                  * sg[0] is for output
802                  * sg[1] for input
803                  */
804                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
805
806                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
807                 ctx->fd_status = fd->status;
808                 op = ctx->op;
809                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
810                         struct qm_sg_entry *sg_out;
811                         uint32_t len;
812
813                         sg_out = &job->sg[0];
814                         hw_sg_to_cpu(sg_out);
815                         len = sg_out->length;
816                         op->sym->m_src->pkt_len = len;
817                         op->sym->m_src->data_len = len;
818                 }
819                 if (!ctx->fd_status) {
820                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
821                 } else {
822                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
823                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
824                 }
825                 ops[pkts++] = op;
826
827                 /* report op status to sym->op and then free the ctx memeory */
828                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
829
830                 qman_dqrr_consume(fq, dq);
831         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
832
833         return pkts;
834 }
835
836 static inline struct dpaa_sec_job *
837 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
838 {
839         struct rte_crypto_sym_op *sym = op->sym;
840         struct rte_mbuf *mbuf = sym->m_src;
841         struct dpaa_sec_job *cf;
842         struct dpaa_sec_op_ctx *ctx;
843         struct qm_sg_entry *sg, *out_sg, *in_sg;
844         phys_addr_t start_addr;
845         uint8_t *old_digest, extra_segs;
846
847         if (is_decode(ses))
848                 extra_segs = 3;
849         else
850                 extra_segs = 2;
851
852         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
853                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
854                                 MAX_SG_ENTRIES);
855                 return NULL;
856         }
857         ctx = dpaa_sec_alloc_ctx(ses);
858         if (!ctx)
859                 return NULL;
860
861         cf = &ctx->job;
862         ctx->op = op;
863         old_digest = ctx->digest;
864
865         /* output */
866         out_sg = &cf->sg[0];
867         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
868         out_sg->length = ses->digest_length;
869         cpu_to_hw_sg(out_sg);
870
871         /* input */
872         in_sg = &cf->sg[1];
873         /* need to extend the input to a compound frame */
874         in_sg->extension = 1;
875         in_sg->final = 1;
876         in_sg->length = sym->auth.data.length;
877         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
878
879         /* 1st seg */
880         sg = in_sg + 1;
881         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
882         sg->length = mbuf->data_len - sym->auth.data.offset;
883         sg->offset = sym->auth.data.offset;
884
885         /* Successive segs */
886         mbuf = mbuf->next;
887         while (mbuf) {
888                 cpu_to_hw_sg(sg);
889                 sg++;
890                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
891                 sg->length = mbuf->data_len;
892                 mbuf = mbuf->next;
893         }
894
895         if (is_decode(ses)) {
896                 /* Digest verification case */
897                 cpu_to_hw_sg(sg);
898                 sg++;
899                 rte_memcpy(old_digest, sym->auth.digest.data,
900                                 ses->digest_length);
901                 start_addr = dpaa_mem_vtop(old_digest);
902                 qm_sg_entry_set64(sg, start_addr);
903                 sg->length = ses->digest_length;
904                 in_sg->length += ses->digest_length;
905         } else {
906                 /* Digest calculation case */
907                 sg->length -= ses->digest_length;
908         }
909         sg->final = 1;
910         cpu_to_hw_sg(sg);
911         cpu_to_hw_sg(in_sg);
912
913         return cf;
914 }
915
916 /**
917  * packet looks like:
918  *              |<----data_len------->|
919  *    |ip_header|ah_header|icv|payload|
920  *              ^
921  *              |
922  *         mbuf->pkt.data
923  */
924 static inline struct dpaa_sec_job *
925 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
926 {
927         struct rte_crypto_sym_op *sym = op->sym;
928         struct rte_mbuf *mbuf = sym->m_src;
929         struct dpaa_sec_job *cf;
930         struct dpaa_sec_op_ctx *ctx;
931         struct qm_sg_entry *sg;
932         rte_iova_t start_addr;
933         uint8_t *old_digest;
934
935         ctx = dpaa_sec_alloc_ctx(ses);
936         if (!ctx)
937                 return NULL;
938
939         cf = &ctx->job;
940         ctx->op = op;
941         old_digest = ctx->digest;
942
943         start_addr = rte_pktmbuf_iova(mbuf);
944         /* output */
945         sg = &cf->sg[0];
946         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
947         sg->length = ses->digest_length;
948         cpu_to_hw_sg(sg);
949
950         /* input */
951         sg = &cf->sg[1];
952         if (is_decode(ses)) {
953                 /* need to extend the input to a compound frame */
954                 sg->extension = 1;
955                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
956                 sg->length = sym->auth.data.length + ses->digest_length;
957                 sg->final = 1;
958                 cpu_to_hw_sg(sg);
959
960                 sg = &cf->sg[2];
961                 /* hash result or digest, save digest first */
962                 rte_memcpy(old_digest, sym->auth.digest.data,
963                            ses->digest_length);
964                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
965                 sg->length = sym->auth.data.length;
966                 cpu_to_hw_sg(sg);
967
968                 /* let's check digest by hw */
969                 start_addr = dpaa_mem_vtop(old_digest);
970                 sg++;
971                 qm_sg_entry_set64(sg, start_addr);
972                 sg->length = ses->digest_length;
973                 sg->final = 1;
974                 cpu_to_hw_sg(sg);
975         } else {
976                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
977                 sg->length = sym->auth.data.length;
978                 sg->final = 1;
979                 cpu_to_hw_sg(sg);
980         }
981
982         return cf;
983 }
984
985 static inline struct dpaa_sec_job *
986 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
987 {
988         struct rte_crypto_sym_op *sym = op->sym;
989         struct dpaa_sec_job *cf;
990         struct dpaa_sec_op_ctx *ctx;
991         struct qm_sg_entry *sg, *out_sg, *in_sg;
992         struct rte_mbuf *mbuf;
993         uint8_t req_segs;
994         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
995                         ses->iv.offset);
996
997         if (sym->m_dst) {
998                 mbuf = sym->m_dst;
999                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1000         } else {
1001                 mbuf = sym->m_src;
1002                 req_segs = mbuf->nb_segs * 2 + 3;
1003         }
1004
1005         if (req_segs > MAX_SG_ENTRIES) {
1006                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1007                                 MAX_SG_ENTRIES);
1008                 return NULL;
1009         }
1010
1011         ctx = dpaa_sec_alloc_ctx(ses);
1012         if (!ctx)
1013                 return NULL;
1014
1015         cf = &ctx->job;
1016         ctx->op = op;
1017
1018         /* output */
1019         out_sg = &cf->sg[0];
1020         out_sg->extension = 1;
1021         out_sg->length = sym->cipher.data.length;
1022         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1023         cpu_to_hw_sg(out_sg);
1024
1025         /* 1st seg */
1026         sg = &cf->sg[2];
1027         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1028         sg->length = mbuf->data_len - sym->cipher.data.offset;
1029         sg->offset = sym->cipher.data.offset;
1030
1031         /* Successive segs */
1032         mbuf = mbuf->next;
1033         while (mbuf) {
1034                 cpu_to_hw_sg(sg);
1035                 sg++;
1036                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1037                 sg->length = mbuf->data_len;
1038                 mbuf = mbuf->next;
1039         }
1040         sg->final = 1;
1041         cpu_to_hw_sg(sg);
1042
1043         /* input */
1044         mbuf = sym->m_src;
1045         in_sg = &cf->sg[1];
1046         in_sg->extension = 1;
1047         in_sg->final = 1;
1048         in_sg->length = sym->cipher.data.length + ses->iv.length;
1049
1050         sg++;
1051         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1052         cpu_to_hw_sg(in_sg);
1053
1054         /* IV */
1055         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1056         sg->length = ses->iv.length;
1057         cpu_to_hw_sg(sg);
1058
1059         /* 1st seg */
1060         sg++;
1061         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1062         sg->length = mbuf->data_len - sym->cipher.data.offset;
1063         sg->offset = sym->cipher.data.offset;
1064
1065         /* Successive segs */
1066         mbuf = mbuf->next;
1067         while (mbuf) {
1068                 cpu_to_hw_sg(sg);
1069                 sg++;
1070                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1071                 sg->length = mbuf->data_len;
1072                 mbuf = mbuf->next;
1073         }
1074         sg->final = 1;
1075         cpu_to_hw_sg(sg);
1076
1077         return cf;
1078 }
1079
1080 static inline struct dpaa_sec_job *
1081 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1082 {
1083         struct rte_crypto_sym_op *sym = op->sym;
1084         struct dpaa_sec_job *cf;
1085         struct dpaa_sec_op_ctx *ctx;
1086         struct qm_sg_entry *sg;
1087         rte_iova_t src_start_addr, dst_start_addr;
1088         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1089                         ses->iv.offset);
1090
1091         ctx = dpaa_sec_alloc_ctx(ses);
1092         if (!ctx)
1093                 return NULL;
1094
1095         cf = &ctx->job;
1096         ctx->op = op;
1097
1098         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1099
1100         if (sym->m_dst)
1101                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1102         else
1103                 dst_start_addr = src_start_addr;
1104
1105         /* output */
1106         sg = &cf->sg[0];
1107         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1108         sg->length = sym->cipher.data.length + ses->iv.length;
1109         cpu_to_hw_sg(sg);
1110
1111         /* input */
1112         sg = &cf->sg[1];
1113
1114         /* need to extend the input to a compound frame */
1115         sg->extension = 1;
1116         sg->final = 1;
1117         sg->length = sym->cipher.data.length + ses->iv.length;
1118         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1119         cpu_to_hw_sg(sg);
1120
1121         sg = &cf->sg[2];
1122         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1123         sg->length = ses->iv.length;
1124         cpu_to_hw_sg(sg);
1125
1126         sg++;
1127         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1128         sg->length = sym->cipher.data.length;
1129         sg->final = 1;
1130         cpu_to_hw_sg(sg);
1131
1132         return cf;
1133 }
1134
1135 static inline struct dpaa_sec_job *
1136 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1137 {
1138         struct rte_crypto_sym_op *sym = op->sym;
1139         struct dpaa_sec_job *cf;
1140         struct dpaa_sec_op_ctx *ctx;
1141         struct qm_sg_entry *sg, *out_sg, *in_sg;
1142         struct rte_mbuf *mbuf;
1143         uint8_t req_segs;
1144         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1145                         ses->iv.offset);
1146
1147         if (sym->m_dst) {
1148                 mbuf = sym->m_dst;
1149                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1150         } else {
1151                 mbuf = sym->m_src;
1152                 req_segs = mbuf->nb_segs * 2 + 4;
1153         }
1154
1155         if (ses->auth_only_len)
1156                 req_segs++;
1157
1158         if (req_segs > MAX_SG_ENTRIES) {
1159                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1160                                 MAX_SG_ENTRIES);
1161                 return NULL;
1162         }
1163
1164         ctx = dpaa_sec_alloc_ctx(ses);
1165         if (!ctx)
1166                 return NULL;
1167
1168         cf = &ctx->job;
1169         ctx->op = op;
1170
1171         rte_prefetch0(cf->sg);
1172
1173         /* output */
1174         out_sg = &cf->sg[0];
1175         out_sg->extension = 1;
1176         if (is_encode(ses))
1177                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1178                                                 + ses->digest_length;
1179         else
1180                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1181
1182         /* output sg entries */
1183         sg = &cf->sg[2];
1184         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1185         cpu_to_hw_sg(out_sg);
1186
1187         /* 1st seg */
1188         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1189         sg->length = mbuf->data_len - sym->aead.data.offset +
1190                                         ses->auth_only_len;
1191         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1192
1193         /* Successive segs */
1194         mbuf = mbuf->next;
1195         while (mbuf) {
1196                 cpu_to_hw_sg(sg);
1197                 sg++;
1198                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1199                 sg->length = mbuf->data_len;
1200                 mbuf = mbuf->next;
1201         }
1202         sg->length -= ses->digest_length;
1203
1204         if (is_encode(ses)) {
1205                 cpu_to_hw_sg(sg);
1206                 /* set auth output */
1207                 sg++;
1208                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1209                 sg->length = ses->digest_length;
1210         }
1211         sg->final = 1;
1212         cpu_to_hw_sg(sg);
1213
1214         /* input */
1215         mbuf = sym->m_src;
1216         in_sg = &cf->sg[1];
1217         in_sg->extension = 1;
1218         in_sg->final = 1;
1219         if (is_encode(ses))
1220                 in_sg->length = ses->iv.length + sym->aead.data.length
1221                                                         + ses->auth_only_len;
1222         else
1223                 in_sg->length = ses->iv.length + sym->aead.data.length
1224                                 + ses->auth_only_len + ses->digest_length;
1225
1226         /* input sg entries */
1227         sg++;
1228         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1229         cpu_to_hw_sg(in_sg);
1230
1231         /* 1st seg IV */
1232         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1233         sg->length = ses->iv.length;
1234         cpu_to_hw_sg(sg);
1235
1236         /* 2nd seg auth only */
1237         if (ses->auth_only_len) {
1238                 sg++;
1239                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1240                 sg->length = ses->auth_only_len;
1241                 cpu_to_hw_sg(sg);
1242         }
1243
1244         /* 3rd seg */
1245         sg++;
1246         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1247         sg->length = mbuf->data_len - sym->aead.data.offset;
1248         sg->offset = sym->aead.data.offset;
1249
1250         /* Successive segs */
1251         mbuf = mbuf->next;
1252         while (mbuf) {
1253                 cpu_to_hw_sg(sg);
1254                 sg++;
1255                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1256                 sg->length = mbuf->data_len;
1257                 mbuf = mbuf->next;
1258         }
1259
1260         if (is_decode(ses)) {
1261                 cpu_to_hw_sg(sg);
1262                 sg++;
1263                 memcpy(ctx->digest, sym->aead.digest.data,
1264                         ses->digest_length);
1265                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1266                 sg->length = ses->digest_length;
1267         }
1268         sg->final = 1;
1269         cpu_to_hw_sg(sg);
1270
1271         return cf;
1272 }
1273
1274 static inline struct dpaa_sec_job *
1275 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1276 {
1277         struct rte_crypto_sym_op *sym = op->sym;
1278         struct dpaa_sec_job *cf;
1279         struct dpaa_sec_op_ctx *ctx;
1280         struct qm_sg_entry *sg;
1281         uint32_t length = 0;
1282         rte_iova_t src_start_addr, dst_start_addr;
1283         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1284                         ses->iv.offset);
1285
1286         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1287
1288         if (sym->m_dst)
1289                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1290         else
1291                 dst_start_addr = src_start_addr;
1292
1293         ctx = dpaa_sec_alloc_ctx(ses);
1294         if (!ctx)
1295                 return NULL;
1296
1297         cf = &ctx->job;
1298         ctx->op = op;
1299
1300         /* input */
1301         rte_prefetch0(cf->sg);
1302         sg = &cf->sg[2];
1303         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1304         if (is_encode(ses)) {
1305                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1306                 sg->length = ses->iv.length;
1307                 length += sg->length;
1308                 cpu_to_hw_sg(sg);
1309
1310                 sg++;
1311                 if (ses->auth_only_len) {
1312                         qm_sg_entry_set64(sg,
1313                                           dpaa_mem_vtop(sym->aead.aad.data));
1314                         sg->length = ses->auth_only_len;
1315                         length += sg->length;
1316                         cpu_to_hw_sg(sg);
1317                         sg++;
1318                 }
1319                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1320                 sg->length = sym->aead.data.length;
1321                 length += sg->length;
1322                 sg->final = 1;
1323                 cpu_to_hw_sg(sg);
1324         } else {
1325                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1326                 sg->length = ses->iv.length;
1327                 length += sg->length;
1328                 cpu_to_hw_sg(sg);
1329
1330                 sg++;
1331                 if (ses->auth_only_len) {
1332                         qm_sg_entry_set64(sg,
1333                                           dpaa_mem_vtop(sym->aead.aad.data));
1334                         sg->length = ses->auth_only_len;
1335                         length += sg->length;
1336                         cpu_to_hw_sg(sg);
1337                         sg++;
1338                 }
1339                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1340                 sg->length = sym->aead.data.length;
1341                 length += sg->length;
1342                 cpu_to_hw_sg(sg);
1343
1344                 memcpy(ctx->digest, sym->aead.digest.data,
1345                        ses->digest_length);
1346                 sg++;
1347
1348                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1349                 sg->length = ses->digest_length;
1350                 length += sg->length;
1351                 sg->final = 1;
1352                 cpu_to_hw_sg(sg);
1353         }
1354         /* input compound frame */
1355         cf->sg[1].length = length;
1356         cf->sg[1].extension = 1;
1357         cf->sg[1].final = 1;
1358         cpu_to_hw_sg(&cf->sg[1]);
1359
1360         /* output */
1361         sg++;
1362         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1363         qm_sg_entry_set64(sg,
1364                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1365         sg->length = sym->aead.data.length + ses->auth_only_len;
1366         length = sg->length;
1367         if (is_encode(ses)) {
1368                 cpu_to_hw_sg(sg);
1369                 /* set auth output */
1370                 sg++;
1371                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1372                 sg->length = ses->digest_length;
1373                 length += sg->length;
1374         }
1375         sg->final = 1;
1376         cpu_to_hw_sg(sg);
1377
1378         /* output compound frame */
1379         cf->sg[0].length = length;
1380         cf->sg[0].extension = 1;
1381         cpu_to_hw_sg(&cf->sg[0]);
1382
1383         return cf;
1384 }
1385
1386 static inline struct dpaa_sec_job *
1387 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1388 {
1389         struct rte_crypto_sym_op *sym = op->sym;
1390         struct dpaa_sec_job *cf;
1391         struct dpaa_sec_op_ctx *ctx;
1392         struct qm_sg_entry *sg, *out_sg, *in_sg;
1393         struct rte_mbuf *mbuf;
1394         uint8_t req_segs;
1395         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1396                         ses->iv.offset);
1397
1398         if (sym->m_dst) {
1399                 mbuf = sym->m_dst;
1400                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1401         } else {
1402                 mbuf = sym->m_src;
1403                 req_segs = mbuf->nb_segs * 2 + 4;
1404         }
1405
1406         if (req_segs > MAX_SG_ENTRIES) {
1407                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1408                                 MAX_SG_ENTRIES);
1409                 return NULL;
1410         }
1411
1412         ctx = dpaa_sec_alloc_ctx(ses);
1413         if (!ctx)
1414                 return NULL;
1415
1416         cf = &ctx->job;
1417         ctx->op = op;
1418
1419         rte_prefetch0(cf->sg);
1420
1421         /* output */
1422         out_sg = &cf->sg[0];
1423         out_sg->extension = 1;
1424         if (is_encode(ses))
1425                 out_sg->length = sym->auth.data.length + ses->digest_length;
1426         else
1427                 out_sg->length = sym->auth.data.length;
1428
1429         /* output sg entries */
1430         sg = &cf->sg[2];
1431         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1432         cpu_to_hw_sg(out_sg);
1433
1434         /* 1st seg */
1435         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1436         sg->length = mbuf->data_len - sym->auth.data.offset;
1437         sg->offset = sym->auth.data.offset;
1438
1439         /* Successive segs */
1440         mbuf = mbuf->next;
1441         while (mbuf) {
1442                 cpu_to_hw_sg(sg);
1443                 sg++;
1444                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1445                 sg->length = mbuf->data_len;
1446                 mbuf = mbuf->next;
1447         }
1448         sg->length -= ses->digest_length;
1449
1450         if (is_encode(ses)) {
1451                 cpu_to_hw_sg(sg);
1452                 /* set auth output */
1453                 sg++;
1454                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1455                 sg->length = ses->digest_length;
1456         }
1457         sg->final = 1;
1458         cpu_to_hw_sg(sg);
1459
1460         /* input */
1461         mbuf = sym->m_src;
1462         in_sg = &cf->sg[1];
1463         in_sg->extension = 1;
1464         in_sg->final = 1;
1465         if (is_encode(ses))
1466                 in_sg->length = ses->iv.length + sym->auth.data.length;
1467         else
1468                 in_sg->length = ses->iv.length + sym->auth.data.length
1469                                                 + ses->digest_length;
1470
1471         /* input sg entries */
1472         sg++;
1473         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1474         cpu_to_hw_sg(in_sg);
1475
1476         /* 1st seg IV */
1477         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1478         sg->length = ses->iv.length;
1479         cpu_to_hw_sg(sg);
1480
1481         /* 2nd seg */
1482         sg++;
1483         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1484         sg->length = mbuf->data_len - sym->auth.data.offset;
1485         sg->offset = sym->auth.data.offset;
1486
1487         /* Successive segs */
1488         mbuf = mbuf->next;
1489         while (mbuf) {
1490                 cpu_to_hw_sg(sg);
1491                 sg++;
1492                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1493                 sg->length = mbuf->data_len;
1494                 mbuf = mbuf->next;
1495         }
1496
1497         sg->length -= ses->digest_length;
1498         if (is_decode(ses)) {
1499                 cpu_to_hw_sg(sg);
1500                 sg++;
1501                 memcpy(ctx->digest, sym->auth.digest.data,
1502                         ses->digest_length);
1503                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1504                 sg->length = ses->digest_length;
1505         }
1506         sg->final = 1;
1507         cpu_to_hw_sg(sg);
1508
1509         return cf;
1510 }
1511
1512 static inline struct dpaa_sec_job *
1513 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1514 {
1515         struct rte_crypto_sym_op *sym = op->sym;
1516         struct dpaa_sec_job *cf;
1517         struct dpaa_sec_op_ctx *ctx;
1518         struct qm_sg_entry *sg;
1519         rte_iova_t src_start_addr, dst_start_addr;
1520         uint32_t length = 0;
1521         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1522                         ses->iv.offset);
1523
1524         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1525         if (sym->m_dst)
1526                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1527         else
1528                 dst_start_addr = src_start_addr;
1529
1530         ctx = dpaa_sec_alloc_ctx(ses);
1531         if (!ctx)
1532                 return NULL;
1533
1534         cf = &ctx->job;
1535         ctx->op = op;
1536
1537         /* input */
1538         rte_prefetch0(cf->sg);
1539         sg = &cf->sg[2];
1540         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1541         if (is_encode(ses)) {
1542                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1543                 sg->length = ses->iv.length;
1544                 length += sg->length;
1545                 cpu_to_hw_sg(sg);
1546
1547                 sg++;
1548                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1549                 sg->length = sym->auth.data.length;
1550                 length += sg->length;
1551                 sg->final = 1;
1552                 cpu_to_hw_sg(sg);
1553         } else {
1554                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1555                 sg->length = ses->iv.length;
1556                 length += sg->length;
1557                 cpu_to_hw_sg(sg);
1558
1559                 sg++;
1560
1561                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1562                 sg->length = sym->auth.data.length;
1563                 length += sg->length;
1564                 cpu_to_hw_sg(sg);
1565
1566                 memcpy(ctx->digest, sym->auth.digest.data,
1567                        ses->digest_length);
1568                 sg++;
1569
1570                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1571                 sg->length = ses->digest_length;
1572                 length += sg->length;
1573                 sg->final = 1;
1574                 cpu_to_hw_sg(sg);
1575         }
1576         /* input compound frame */
1577         cf->sg[1].length = length;
1578         cf->sg[1].extension = 1;
1579         cf->sg[1].final = 1;
1580         cpu_to_hw_sg(&cf->sg[1]);
1581
1582         /* output */
1583         sg++;
1584         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1585         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1586         sg->length = sym->cipher.data.length;
1587         length = sg->length;
1588         if (is_encode(ses)) {
1589                 cpu_to_hw_sg(sg);
1590                 /* set auth output */
1591                 sg++;
1592                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1593                 sg->length = ses->digest_length;
1594                 length += sg->length;
1595         }
1596         sg->final = 1;
1597         cpu_to_hw_sg(sg);
1598
1599         /* output compound frame */
1600         cf->sg[0].length = length;
1601         cf->sg[0].extension = 1;
1602         cpu_to_hw_sg(&cf->sg[0]);
1603
1604         return cf;
1605 }
1606
1607 static inline struct dpaa_sec_job *
1608 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1609 {
1610         struct rte_crypto_sym_op *sym = op->sym;
1611         struct dpaa_sec_job *cf;
1612         struct dpaa_sec_op_ctx *ctx;
1613         struct qm_sg_entry *sg;
1614         phys_addr_t src_start_addr, dst_start_addr;
1615
1616         ctx = dpaa_sec_alloc_ctx(ses);
1617         if (!ctx)
1618                 return NULL;
1619         cf = &ctx->job;
1620         ctx->op = op;
1621
1622         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1623
1624         if (sym->m_dst)
1625                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1626         else
1627                 dst_start_addr = src_start_addr;
1628
1629         /* input */
1630         sg = &cf->sg[1];
1631         qm_sg_entry_set64(sg, src_start_addr);
1632         sg->length = sym->m_src->pkt_len;
1633         sg->final = 1;
1634         cpu_to_hw_sg(sg);
1635
1636         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1637         /* output */
1638         sg = &cf->sg[0];
1639         qm_sg_entry_set64(sg, dst_start_addr);
1640         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1641         cpu_to_hw_sg(sg);
1642
1643         return cf;
1644 }
1645
1646 static uint16_t
1647 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1648                        uint16_t nb_ops)
1649 {
1650         /* Function to transmit the frames to given device and queuepair */
1651         uint32_t loop;
1652         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1653         uint16_t num_tx = 0;
1654         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1655         uint32_t frames_to_send;
1656         struct rte_crypto_op *op;
1657         struct dpaa_sec_job *cf;
1658         dpaa_sec_session *ses;
1659         uint32_t auth_only_len;
1660         struct qman_fq *inq[DPAA_SEC_BURST];
1661
1662         while (nb_ops) {
1663                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1664                                 DPAA_SEC_BURST : nb_ops;
1665                 for (loop = 0; loop < frames_to_send; loop++) {
1666                         op = *(ops++);
1667                         switch (op->sess_type) {
1668                         case RTE_CRYPTO_OP_WITH_SESSION:
1669                                 ses = (dpaa_sec_session *)
1670                                         get_sym_session_private_data(
1671                                                         op->sym->session,
1672                                                         cryptodev_driver_id);
1673                                 break;
1674                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1675                                 ses = (dpaa_sec_session *)
1676                                         get_sec_session_private_data(
1677                                                         op->sym->sec_session);
1678                                 break;
1679                         default:
1680                                 DPAA_SEC_DP_ERR(
1681                                         "sessionless crypto op not supported");
1682                                 frames_to_send = loop;
1683                                 nb_ops = loop;
1684                                 goto send_pkts;
1685                         }
1686                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1687                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1688                                         frames_to_send = loop;
1689                                         nb_ops = loop;
1690                                         goto send_pkts;
1691                                 }
1692                         } else if (unlikely(ses->qp[rte_lcore_id() %
1693                                                 MAX_DPAA_CORES] != qp)) {
1694                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1695                                         " New qp = %p\n",
1696                                         ses->qp[rte_lcore_id() %
1697                                         MAX_DPAA_CORES], qp);
1698                                 frames_to_send = loop;
1699                                 nb_ops = loop;
1700                                 goto send_pkts;
1701                         }
1702
1703                         auth_only_len = op->sym->auth.data.length -
1704                                                 op->sym->cipher.data.length;
1705                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1706                                 if (is_proto_ipsec(ses)) {
1707                                         cf = build_proto(op, ses);
1708                                 } else if (is_proto_pdcp(ses)) {
1709                                         cf = build_proto(op, ses);
1710                                 } else if (is_auth_only(ses)) {
1711                                         cf = build_auth_only(op, ses);
1712                                 } else if (is_cipher_only(ses)) {
1713                                         cf = build_cipher_only(op, ses);
1714                                 } else if (is_aead(ses)) {
1715                                         cf = build_cipher_auth_gcm(op, ses);
1716                                         auth_only_len = ses->auth_only_len;
1717                                 } else if (is_auth_cipher(ses)) {
1718                                         cf = build_cipher_auth(op, ses);
1719                                 } else {
1720                                         DPAA_SEC_DP_ERR("not supported ops");
1721                                         frames_to_send = loop;
1722                                         nb_ops = loop;
1723                                         goto send_pkts;
1724                                 }
1725                         } else {
1726                                 if (is_auth_only(ses)) {
1727                                         cf = build_auth_only_sg(op, ses);
1728                                 } else if (is_cipher_only(ses)) {
1729                                         cf = build_cipher_only_sg(op, ses);
1730                                 } else if (is_aead(ses)) {
1731                                         cf = build_cipher_auth_gcm_sg(op, ses);
1732                                         auth_only_len = ses->auth_only_len;
1733                                 } else if (is_auth_cipher(ses)) {
1734                                         cf = build_cipher_auth_sg(op, ses);
1735                                 } else {
1736                                         DPAA_SEC_DP_ERR("not supported ops");
1737                                         frames_to_send = loop;
1738                                         nb_ops = loop;
1739                                         goto send_pkts;
1740                                 }
1741                         }
1742                         if (unlikely(!cf)) {
1743                                 frames_to_send = loop;
1744                                 nb_ops = loop;
1745                                 goto send_pkts;
1746                         }
1747
1748                         fd = &fds[loop];
1749                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1750                         fd->opaque_addr = 0;
1751                         fd->cmd = 0;
1752                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1753                         fd->_format1 = qm_fd_compound;
1754                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1755                         /* Auth_only_len is set as 0 in descriptor and it is
1756                          * overwritten here in the fd.cmd which will update
1757                          * the DPOVRD reg.
1758                          */
1759                         if (auth_only_len)
1760                                 fd->cmd = 0x80000000 | auth_only_len;
1761
1762                 }
1763 send_pkts:
1764                 loop = 0;
1765                 while (loop < frames_to_send) {
1766                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1767                                         frames_to_send - loop);
1768                 }
1769                 nb_ops -= frames_to_send;
1770                 num_tx += frames_to_send;
1771         }
1772
1773         dpaa_qp->tx_pkts += num_tx;
1774         dpaa_qp->tx_errs += nb_ops - num_tx;
1775
1776         return num_tx;
1777 }
1778
1779 static uint16_t
1780 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1781                        uint16_t nb_ops)
1782 {
1783         uint16_t num_rx;
1784         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1785
1786         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1787
1788         dpaa_qp->rx_pkts += num_rx;
1789         dpaa_qp->rx_errs += nb_ops - num_rx;
1790
1791         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1792
1793         return num_rx;
1794 }
1795
1796 /** Release queue pair */
1797 static int
1798 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1799                             uint16_t qp_id)
1800 {
1801         struct dpaa_sec_dev_private *internals;
1802         struct dpaa_sec_qp *qp = NULL;
1803
1804         PMD_INIT_FUNC_TRACE();
1805
1806         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1807
1808         internals = dev->data->dev_private;
1809         if (qp_id >= internals->max_nb_queue_pairs) {
1810                 DPAA_SEC_ERR("Max supported qpid %d",
1811                              internals->max_nb_queue_pairs);
1812                 return -EINVAL;
1813         }
1814
1815         qp = &internals->qps[qp_id];
1816         qp->internals = NULL;
1817         dev->data->queue_pairs[qp_id] = NULL;
1818
1819         return 0;
1820 }
1821
1822 /** Setup a queue pair */
1823 static int
1824 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1825                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1826                 __rte_unused int socket_id)
1827 {
1828         struct dpaa_sec_dev_private *internals;
1829         struct dpaa_sec_qp *qp = NULL;
1830
1831         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1832
1833         internals = dev->data->dev_private;
1834         if (qp_id >= internals->max_nb_queue_pairs) {
1835                 DPAA_SEC_ERR("Max supported qpid %d",
1836                              internals->max_nb_queue_pairs);
1837                 return -EINVAL;
1838         }
1839
1840         qp = &internals->qps[qp_id];
1841         qp->internals = internals;
1842         dev->data->queue_pairs[qp_id] = qp;
1843
1844         return 0;
1845 }
1846
1847 /** Return the number of allocated queue pairs */
1848 static uint32_t
1849 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1850 {
1851         PMD_INIT_FUNC_TRACE();
1852
1853         return dev->data->nb_queue_pairs;
1854 }
1855
1856 /** Returns the size of session structure */
1857 static unsigned int
1858 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1859 {
1860         PMD_INIT_FUNC_TRACE();
1861
1862         return sizeof(dpaa_sec_session);
1863 }
1864
1865 static int
1866 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1867                      struct rte_crypto_sym_xform *xform,
1868                      dpaa_sec_session *session)
1869 {
1870         session->cipher_alg = xform->cipher.algo;
1871         session->iv.length = xform->cipher.iv.length;
1872         session->iv.offset = xform->cipher.iv.offset;
1873         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1874                                                RTE_CACHE_LINE_SIZE);
1875         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1876                 DPAA_SEC_ERR("No Memory for cipher key");
1877                 return -ENOMEM;
1878         }
1879         session->cipher_key.length = xform->cipher.key.length;
1880
1881         memcpy(session->cipher_key.data, xform->cipher.key.data,
1882                xform->cipher.key.length);
1883         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1884                         DIR_ENC : DIR_DEC;
1885
1886         return 0;
1887 }
1888
1889 static int
1890 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1891                    struct rte_crypto_sym_xform *xform,
1892                    dpaa_sec_session *session)
1893 {
1894         session->auth_alg = xform->auth.algo;
1895         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1896                                              RTE_CACHE_LINE_SIZE);
1897         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1898                 DPAA_SEC_ERR("No Memory for auth key");
1899                 return -ENOMEM;
1900         }
1901         session->auth_key.length = xform->auth.key.length;
1902         session->digest_length = xform->auth.digest_length;
1903
1904         memcpy(session->auth_key.data, xform->auth.key.data,
1905                xform->auth.key.length);
1906         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1907                         DIR_ENC : DIR_DEC;
1908
1909         return 0;
1910 }
1911
1912 static int
1913 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1914                    struct rte_crypto_sym_xform *xform,
1915                    dpaa_sec_session *session)
1916 {
1917         session->aead_alg = xform->aead.algo;
1918         session->iv.length = xform->aead.iv.length;
1919         session->iv.offset = xform->aead.iv.offset;
1920         session->auth_only_len = xform->aead.aad_length;
1921         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1922                                              RTE_CACHE_LINE_SIZE);
1923         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1924                 DPAA_SEC_ERR("No Memory for aead key\n");
1925                 return -ENOMEM;
1926         }
1927         session->aead_key.length = xform->aead.key.length;
1928         session->digest_length = xform->aead.digest_length;
1929
1930         memcpy(session->aead_key.data, xform->aead.key.data,
1931                xform->aead.key.length);
1932         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1933                         DIR_ENC : DIR_DEC;
1934
1935         return 0;
1936 }
1937
1938 static struct qman_fq *
1939 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1940 {
1941         unsigned int i;
1942
1943         for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
1944                 if (qi->inq_attach[i] == 0) {
1945                         qi->inq_attach[i] = 1;
1946                         return &qi->inq[i];
1947                 }
1948         }
1949         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
1950
1951         return NULL;
1952 }
1953
1954 static int
1955 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1956 {
1957         unsigned int i;
1958
1959         for (i = 0; i < qi->max_nb_sessions; i++) {
1960                 if (&qi->inq[i] == fq) {
1961                         qman_retire_fq(fq, NULL);
1962                         qman_oos_fq(fq);
1963                         qi->inq_attach[i] = 0;
1964                         return 0;
1965                 }
1966         }
1967         return -1;
1968 }
1969
1970 static int
1971 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1972 {
1973         int ret;
1974
1975         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
1976         ret = dpaa_sec_prep_cdb(sess);
1977         if (ret) {
1978                 DPAA_SEC_ERR("Unable to prepare sec cdb");
1979                 return -1;
1980         }
1981         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1982                 ret = rte_dpaa_portal_init((void *)0);
1983                 if (ret) {
1984                         DPAA_SEC_ERR("Failure in affining portal");
1985                         return ret;
1986                 }
1987         }
1988         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
1989                                dpaa_mem_vtop(&sess->cdb),
1990                                qman_fq_fqid(&qp->outq));
1991         if (ret)
1992                 DPAA_SEC_ERR("Unable to init sec queue");
1993
1994         return ret;
1995 }
1996
1997 static int
1998 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1999                             struct rte_crypto_sym_xform *xform, void *sess)
2000 {
2001         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2002         dpaa_sec_session *session = sess;
2003         uint32_t i;
2004
2005         PMD_INIT_FUNC_TRACE();
2006
2007         if (unlikely(sess == NULL)) {
2008                 DPAA_SEC_ERR("invalid session struct");
2009                 return -EINVAL;
2010         }
2011         memset(session, 0, sizeof(dpaa_sec_session));
2012
2013         /* Default IV length = 0 */
2014         session->iv.length = 0;
2015
2016         /* Cipher Only */
2017         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2018                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2019                 dpaa_sec_cipher_init(dev, xform, session);
2020
2021         /* Authentication Only */
2022         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2023                    xform->next == NULL) {
2024                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2025                 dpaa_sec_auth_init(dev, xform, session);
2026
2027         /* Cipher then Authenticate */
2028         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2029                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2030                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2031                         dpaa_sec_cipher_init(dev, xform, session);
2032                         dpaa_sec_auth_init(dev, xform->next, session);
2033                 } else {
2034                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2035                         return -EINVAL;
2036                 }
2037
2038         /* Authenticate then Cipher */
2039         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2040                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2041                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2042                         dpaa_sec_auth_init(dev, xform, session);
2043                         dpaa_sec_cipher_init(dev, xform->next, session);
2044                 } else {
2045                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2046                         return -EINVAL;
2047                 }
2048
2049         /* AEAD operation for AES-GCM kind of Algorithms */
2050         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2051                    xform->next == NULL) {
2052                 dpaa_sec_aead_init(dev, xform, session);
2053
2054         } else {
2055                 DPAA_SEC_ERR("Invalid crypto type");
2056                 return -EINVAL;
2057         }
2058         session->ctx_pool = internals->ctx_pool;
2059         rte_spinlock_lock(&internals->lock);
2060         for (i = 0; i < MAX_DPAA_CORES; i++) {
2061                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2062                 if (session->inq[i] == NULL) {
2063                         DPAA_SEC_ERR("unable to attach sec queue");
2064                         rte_spinlock_unlock(&internals->lock);
2065                         goto err1;
2066                 }
2067         }
2068         rte_spinlock_unlock(&internals->lock);
2069
2070         return 0;
2071
2072 err1:
2073         rte_free(session->cipher_key.data);
2074         rte_free(session->auth_key.data);
2075         memset(session, 0, sizeof(dpaa_sec_session));
2076
2077         return -EINVAL;
2078 }
2079
2080 static int
2081 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2082                 struct rte_crypto_sym_xform *xform,
2083                 struct rte_cryptodev_sym_session *sess,
2084                 struct rte_mempool *mempool)
2085 {
2086         void *sess_private_data;
2087         int ret;
2088
2089         PMD_INIT_FUNC_TRACE();
2090
2091         if (rte_mempool_get(mempool, &sess_private_data)) {
2092                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2093                 return -ENOMEM;
2094         }
2095
2096         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2097         if (ret != 0) {
2098                 DPAA_SEC_ERR("failed to configure session parameters");
2099
2100                 /* Return session to mempool */
2101                 rte_mempool_put(mempool, sess_private_data);
2102                 return ret;
2103         }
2104
2105         set_sym_session_private_data(sess, dev->driver_id,
2106                         sess_private_data);
2107
2108
2109         return 0;
2110 }
2111
2112 /** Clear the memory of session so it doesn't leave key material behind */
2113 static void
2114 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2115                 struct rte_cryptodev_sym_session *sess)
2116 {
2117         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2118         uint8_t index = dev->driver_id, i;
2119         void *sess_priv = get_sym_session_private_data(sess, index);
2120
2121         PMD_INIT_FUNC_TRACE();
2122
2123         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2124
2125         if (sess_priv) {
2126                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2127
2128                 for (i = 0; i < MAX_DPAA_CORES; i++) {
2129                         if (s->inq[i])
2130                                 dpaa_sec_detach_rxq(qi, s->inq[i]);
2131                         s->inq[i] = NULL;
2132                         s->qp[i] = NULL;
2133                 }
2134                 rte_free(s->cipher_key.data);
2135                 rte_free(s->auth_key.data);
2136                 memset(s, 0, sizeof(dpaa_sec_session));
2137                 set_sym_session_private_data(sess, index, NULL);
2138                 rte_mempool_put(sess_mp, sess_priv);
2139         }
2140 }
2141
2142 static int
2143 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2144                            struct rte_security_session_conf *conf,
2145                            void *sess)
2146 {
2147         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2148         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2149         struct rte_crypto_auth_xform *auth_xform = NULL;
2150         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2151         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2152         uint32_t i;
2153
2154         PMD_INIT_FUNC_TRACE();
2155
2156         memset(session, 0, sizeof(dpaa_sec_session));
2157         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2158                 cipher_xform = &conf->crypto_xform->cipher;
2159                 if (conf->crypto_xform->next)
2160                         auth_xform = &conf->crypto_xform->next->auth;
2161         } else {
2162                 auth_xform = &conf->crypto_xform->auth;
2163                 if (conf->crypto_xform->next)
2164                         cipher_xform = &conf->crypto_xform->next->cipher;
2165         }
2166         session->proto_alg = conf->protocol;
2167
2168         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2169                 session->cipher_key.data = rte_zmalloc(NULL,
2170                                                        cipher_xform->key.length,
2171                                                        RTE_CACHE_LINE_SIZE);
2172                 if (session->cipher_key.data == NULL &&
2173                                 cipher_xform->key.length > 0) {
2174                         DPAA_SEC_ERR("No Memory for cipher key");
2175                         return -ENOMEM;
2176                 }
2177                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2178                                 cipher_xform->key.length);
2179                 session->cipher_key.length = cipher_xform->key.length;
2180
2181                 switch (cipher_xform->algo) {
2182                 case RTE_CRYPTO_CIPHER_AES_CBC:
2183                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2184                 case RTE_CRYPTO_CIPHER_AES_CTR:
2185                         break;
2186                 default:
2187                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2188                                 cipher_xform->algo);
2189                         goto out;
2190                 }
2191                 session->cipher_alg = cipher_xform->algo;
2192         } else {
2193                 session->cipher_key.data = NULL;
2194                 session->cipher_key.length = 0;
2195                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2196         }
2197
2198         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2199                 session->auth_key.data = rte_zmalloc(NULL,
2200                                                 auth_xform->key.length,
2201                                                 RTE_CACHE_LINE_SIZE);
2202                 if (session->auth_key.data == NULL &&
2203                                 auth_xform->key.length > 0) {
2204                         DPAA_SEC_ERR("No Memory for auth key");
2205                         rte_free(session->cipher_key.data);
2206                         return -ENOMEM;
2207                 }
2208                 memcpy(session->auth_key.data, auth_xform->key.data,
2209                                 auth_xform->key.length);
2210                 session->auth_key.length = auth_xform->key.length;
2211
2212                 switch (auth_xform->algo) {
2213                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2214                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2215                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2216                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2217                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2218                 case RTE_CRYPTO_AUTH_AES_CMAC:
2219                         break;
2220                 default:
2221                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2222                                 auth_xform->algo);
2223                         goto out;
2224                 }
2225                 session->auth_alg = auth_xform->algo;
2226         } else {
2227                 session->auth_key.data = NULL;
2228                 session->auth_key.length = 0;
2229                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2230         }
2231
2232         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2233                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2234                                 sizeof(session->ip4_hdr));
2235                 session->ip4_hdr.ip_v = IPVERSION;
2236                 session->ip4_hdr.ip_hl = 5;
2237                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2238                                                 sizeof(session->ip4_hdr));
2239                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2240                 session->ip4_hdr.ip_id = 0;
2241                 session->ip4_hdr.ip_off = 0;
2242                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2243                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2244                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2245                                 : IPPROTO_AH;
2246                 session->ip4_hdr.ip_sum = 0;
2247                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2248                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2249                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2250                                                 (void *)&session->ip4_hdr,
2251                                                 sizeof(struct ip));
2252
2253                 session->encap_pdb.options =
2254                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2255                         PDBOPTS_ESP_OIHI_PDB_INL |
2256                         PDBOPTS_ESP_IVSRC |
2257                         PDBHMO_ESP_ENCAP_DTTL |
2258                         PDBHMO_ESP_SNR;
2259                 session->encap_pdb.spi = ipsec_xform->spi;
2260                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2261
2262                 session->dir = DIR_ENC;
2263         } else if (ipsec_xform->direction ==
2264                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2265                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2266                 session->decap_pdb.options = sizeof(struct ip) << 16;
2267                 session->dir = DIR_DEC;
2268         } else
2269                 goto out;
2270         session->ctx_pool = internals->ctx_pool;
2271         rte_spinlock_lock(&internals->lock);
2272         for (i = 0; i < MAX_DPAA_CORES; i++) {
2273                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2274                 if (session->inq[i] == NULL) {
2275                         DPAA_SEC_ERR("unable to attach sec queue");
2276                         rte_spinlock_unlock(&internals->lock);
2277                         goto out;
2278                 }
2279         }
2280         rte_spinlock_unlock(&internals->lock);
2281
2282         return 0;
2283 out:
2284         rte_free(session->auth_key.data);
2285         rte_free(session->cipher_key.data);
2286         memset(session, 0, sizeof(dpaa_sec_session));
2287         return -1;
2288 }
2289
2290 static int
2291 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2292                           struct rte_security_session_conf *conf,
2293                           void *sess)
2294 {
2295         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2296         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2297         struct rte_crypto_auth_xform *auth_xform = NULL;
2298         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2299         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2300         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2301         uint32_t i;
2302
2303         PMD_INIT_FUNC_TRACE();
2304
2305         memset(session, 0, sizeof(dpaa_sec_session));
2306
2307         /* find xfrm types */
2308         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2309                 cipher_xform = &xform->cipher;
2310                 if (xform->next != NULL)
2311                         auth_xform = &xform->next->auth;
2312         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2313                 auth_xform = &xform->auth;
2314                 if (xform->next != NULL)
2315                         cipher_xform = &xform->next->cipher;
2316         } else {
2317                 DPAA_SEC_ERR("Invalid crypto type");
2318                 return -EINVAL;
2319         }
2320
2321         session->proto_alg = conf->protocol;
2322         if (cipher_xform) {
2323                 session->cipher_key.data = rte_zmalloc(NULL,
2324                                                cipher_xform->key.length,
2325                                                RTE_CACHE_LINE_SIZE);
2326                 if (session->cipher_key.data == NULL &&
2327                                 cipher_xform->key.length > 0) {
2328                         DPAA_SEC_ERR("No Memory for cipher key");
2329                         return -ENOMEM;
2330                 }
2331                 session->cipher_key.length = cipher_xform->key.length;
2332                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2333                         cipher_xform->key.length);
2334                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2335                                         DIR_ENC : DIR_DEC;
2336                 session->cipher_alg = cipher_xform->algo;
2337         } else {
2338                 session->cipher_key.data = NULL;
2339                 session->cipher_key.length = 0;
2340                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2341                 session->dir = DIR_ENC;
2342         }
2343
2344         /* Auth is only applicable for control mode operation. */
2345         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2346                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2347                         DPAA_SEC_ERR(
2348                                 "PDCP Seq Num size should be 5 bits for cmode");
2349                         goto out;
2350                 }
2351                 if (auth_xform) {
2352                         session->auth_key.data = rte_zmalloc(NULL,
2353                                                         auth_xform->key.length,
2354                                                         RTE_CACHE_LINE_SIZE);
2355                         if (session->auth_key.data == NULL &&
2356                                         auth_xform->key.length > 0) {
2357                                 DPAA_SEC_ERR("No Memory for auth key");
2358                                 rte_free(session->cipher_key.data);
2359                                 return -ENOMEM;
2360                         }
2361                         session->auth_key.length = auth_xform->key.length;
2362                         memcpy(session->auth_key.data, auth_xform->key.data,
2363                                         auth_xform->key.length);
2364                         session->auth_alg = auth_xform->algo;
2365                 } else {
2366                         session->auth_key.data = NULL;
2367                         session->auth_key.length = 0;
2368                         session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2369                 }
2370         }
2371         session->pdcp.domain = pdcp_xform->domain;
2372         session->pdcp.bearer = pdcp_xform->bearer;
2373         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2374         session->pdcp.sn_size = pdcp_xform->sn_size;
2375 #ifdef ENABLE_HFN_OVERRIDE
2376         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2377 #endif
2378         session->pdcp.hfn = pdcp_xform->hfn;
2379         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2380
2381         session->ctx_pool = dev_priv->ctx_pool;
2382         rte_spinlock_lock(&dev_priv->lock);
2383         for (i = 0; i < MAX_DPAA_CORES; i++) {
2384                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2385                 if (session->inq[i] == NULL) {
2386                         DPAA_SEC_ERR("unable to attach sec queue");
2387                         rte_spinlock_unlock(&dev_priv->lock);
2388                         goto out;
2389                 }
2390         }
2391         rte_spinlock_unlock(&dev_priv->lock);
2392         return 0;
2393 out:
2394         rte_free(session->auth_key.data);
2395         rte_free(session->cipher_key.data);
2396         memset(session, 0, sizeof(dpaa_sec_session));
2397         return -1;
2398 }
2399
2400 static int
2401 dpaa_sec_security_session_create(void *dev,
2402                                  struct rte_security_session_conf *conf,
2403                                  struct rte_security_session *sess,
2404                                  struct rte_mempool *mempool)
2405 {
2406         void *sess_private_data;
2407         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2408         int ret;
2409
2410         if (rte_mempool_get(mempool, &sess_private_data)) {
2411                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2412                 return -ENOMEM;
2413         }
2414
2415         switch (conf->protocol) {
2416         case RTE_SECURITY_PROTOCOL_IPSEC:
2417                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2418                                 sess_private_data);
2419                 break;
2420         case RTE_SECURITY_PROTOCOL_PDCP:
2421                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2422                                 sess_private_data);
2423                 break;
2424         case RTE_SECURITY_PROTOCOL_MACSEC:
2425                 return -ENOTSUP;
2426         default:
2427                 return -EINVAL;
2428         }
2429         if (ret != 0) {
2430                 DPAA_SEC_ERR("failed to configure session parameters");
2431                 /* Return session to mempool */
2432                 rte_mempool_put(mempool, sess_private_data);
2433                 return ret;
2434         }
2435
2436         set_sec_session_private_data(sess, sess_private_data);
2437
2438         return ret;
2439 }
2440
2441 /** Clear the memory of session so it doesn't leave key material behind */
2442 static int
2443 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2444                 struct rte_security_session *sess)
2445 {
2446         PMD_INIT_FUNC_TRACE();
2447         void *sess_priv = get_sec_session_private_data(sess);
2448
2449         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2450
2451         if (sess_priv) {
2452                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2453
2454                 rte_free(s->cipher_key.data);
2455                 rte_free(s->auth_key.data);
2456                 memset(sess, 0, sizeof(dpaa_sec_session));
2457                 set_sec_session_private_data(sess, NULL);
2458                 rte_mempool_put(sess_mp, sess_priv);
2459         }
2460         return 0;
2461 }
2462
2463
2464 static int
2465 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2466                        struct rte_cryptodev_config *config __rte_unused)
2467 {
2468
2469         char str[20];
2470         struct dpaa_sec_dev_private *internals;
2471
2472         PMD_INIT_FUNC_TRACE();
2473
2474         internals = dev->data->dev_private;
2475         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2476         if (!internals->ctx_pool) {
2477                 internals->ctx_pool = rte_mempool_create((const char *)str,
2478                                                         CTX_POOL_NUM_BUFS,
2479                                                         CTX_POOL_BUF_SIZE,
2480                                                         CTX_POOL_CACHE_SIZE, 0,
2481                                                         NULL, NULL, NULL, NULL,
2482                                                         SOCKET_ID_ANY, 0);
2483                 if (!internals->ctx_pool) {
2484                         DPAA_SEC_ERR("%s create failed\n", str);
2485                         return -ENOMEM;
2486                 }
2487         } else
2488                 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2489                                 dev->data->dev_id);
2490
2491         return 0;
2492 }
2493
2494 static int
2495 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2496 {
2497         PMD_INIT_FUNC_TRACE();
2498         return 0;
2499 }
2500
2501 static void
2502 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2503 {
2504         PMD_INIT_FUNC_TRACE();
2505 }
2506
2507 static int
2508 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2509 {
2510         struct dpaa_sec_dev_private *internals;
2511
2512         PMD_INIT_FUNC_TRACE();
2513
2514         if (dev == NULL)
2515                 return -ENOMEM;
2516
2517         internals = dev->data->dev_private;
2518         rte_mempool_free(internals->ctx_pool);
2519         internals->ctx_pool = NULL;
2520
2521         return 0;
2522 }
2523
2524 static void
2525 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2526                        struct rte_cryptodev_info *info)
2527 {
2528         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2529
2530         PMD_INIT_FUNC_TRACE();
2531         if (info != NULL) {
2532                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2533                 info->feature_flags = dev->feature_flags;
2534                 info->capabilities = dpaa_sec_capabilities;
2535                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2536                 info->driver_id = cryptodev_driver_id;
2537         }
2538 }
2539
2540 static struct rte_cryptodev_ops crypto_ops = {
2541         .dev_configure        = dpaa_sec_dev_configure,
2542         .dev_start            = dpaa_sec_dev_start,
2543         .dev_stop             = dpaa_sec_dev_stop,
2544         .dev_close            = dpaa_sec_dev_close,
2545         .dev_infos_get        = dpaa_sec_dev_infos_get,
2546         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2547         .queue_pair_release   = dpaa_sec_queue_pair_release,
2548         .queue_pair_count     = dpaa_sec_queue_pair_count,
2549         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2550         .sym_session_configure    = dpaa_sec_sym_session_configure,
2551         .sym_session_clear        = dpaa_sec_sym_session_clear
2552 };
2553
2554 static const struct rte_security_capability *
2555 dpaa_sec_capabilities_get(void *device __rte_unused)
2556 {
2557         return dpaa_sec_security_cap;
2558 }
2559
2560 static const struct rte_security_ops dpaa_sec_security_ops = {
2561         .session_create = dpaa_sec_security_session_create,
2562         .session_update = NULL,
2563         .session_stats_get = NULL,
2564         .session_destroy = dpaa_sec_security_session_destroy,
2565         .set_pkt_metadata = NULL,
2566         .capabilities_get = dpaa_sec_capabilities_get
2567 };
2568
2569 static int
2570 dpaa_sec_uninit(struct rte_cryptodev *dev)
2571 {
2572         struct dpaa_sec_dev_private *internals;
2573
2574         if (dev == NULL)
2575                 return -ENODEV;
2576
2577         internals = dev->data->dev_private;
2578         rte_free(dev->security_ctx);
2579
2580         /* In case close has been called, internals->ctx_pool would be NULL */
2581         rte_mempool_free(internals->ctx_pool);
2582         rte_free(internals);
2583
2584         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2585                       dev->data->name, rte_socket_id());
2586
2587         return 0;
2588 }
2589
2590 static int
2591 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2592 {
2593         struct dpaa_sec_dev_private *internals;
2594         struct rte_security_ctx *security_instance;
2595         struct dpaa_sec_qp *qp;
2596         uint32_t i, flags;
2597         int ret;
2598
2599         PMD_INIT_FUNC_TRACE();
2600
2601         cryptodev->driver_id = cryptodev_driver_id;
2602         cryptodev->dev_ops = &crypto_ops;
2603
2604         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2605         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2606         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2607                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2608                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2609                         RTE_CRYPTODEV_FF_SECURITY |
2610                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2611                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2612                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2613                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2614                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2615
2616         internals = cryptodev->data->dev_private;
2617         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2618         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2619
2620         /*
2621          * For secondary processes, we don't initialise any further as primary
2622          * has already done this work. Only check we don't need a different
2623          * RX function
2624          */
2625         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2626                 DPAA_SEC_WARN("Device already init by primary process");
2627                 return 0;
2628         }
2629
2630         /* Initialize security_ctx only for primary process*/
2631         security_instance = rte_malloc("rte_security_instances_ops",
2632                                 sizeof(struct rte_security_ctx), 0);
2633         if (security_instance == NULL)
2634                 return -ENOMEM;
2635         security_instance->device = (void *)cryptodev;
2636         security_instance->ops = &dpaa_sec_security_ops;
2637         security_instance->sess_cnt = 0;
2638         cryptodev->security_ctx = security_instance;
2639
2640         rte_spinlock_init(&internals->lock);
2641         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2642                 /* init qman fq for queue pair */
2643                 qp = &internals->qps[i];
2644                 ret = dpaa_sec_init_tx(&qp->outq);
2645                 if (ret) {
2646                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2647                         goto init_error;
2648                 }
2649         }
2650
2651         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2652                 QMAN_FQ_FLAG_TO_DCPORTAL;
2653         for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2654                 /* create rx qman fq for sessions*/
2655                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2656                 if (unlikely(ret != 0)) {
2657                         DPAA_SEC_ERR("sec qman_create_fq failed");
2658                         goto init_error;
2659                 }
2660         }
2661
2662         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2663         return 0;
2664
2665 init_error:
2666         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2667
2668         dpaa_sec_uninit(cryptodev);
2669         return -EFAULT;
2670 }
2671
2672 static int
2673 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2674                                 struct rte_dpaa_device *dpaa_dev)
2675 {
2676         struct rte_cryptodev *cryptodev;
2677         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2678
2679         int retval;
2680
2681         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2682                         dpaa_dev->id.dev_id);
2683
2684         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2685         if (cryptodev == NULL)
2686                 return -ENOMEM;
2687
2688         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2689                 cryptodev->data->dev_private = rte_zmalloc_socket(
2690                                         "cryptodev private structure",
2691                                         sizeof(struct dpaa_sec_dev_private),
2692                                         RTE_CACHE_LINE_SIZE,
2693                                         rte_socket_id());
2694
2695                 if (cryptodev->data->dev_private == NULL)
2696                         rte_panic("Cannot allocate memzone for private "
2697                                         "device data");
2698         }
2699
2700         dpaa_dev->crypto_dev = cryptodev;
2701         cryptodev->device = &dpaa_dev->device;
2702
2703         /* init user callbacks */
2704         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2705
2706         /* if sec device version is not configured */
2707         if (!rta_get_sec_era()) {
2708                 const struct device_node *caam_node;
2709
2710                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2711                         const uint32_t *prop = of_get_property(caam_node,
2712                                         "fsl,sec-era",
2713                                         NULL);
2714                         if (prop) {
2715                                 rta_set_sec_era(
2716                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2717                                 break;
2718                         }
2719                 }
2720         }
2721
2722         /* Invoke PMD device initialization function */
2723         retval = dpaa_sec_dev_init(cryptodev);
2724         if (retval == 0)
2725                 return 0;
2726
2727         /* In case of error, cleanup is done */
2728         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2729                 rte_free(cryptodev->data->dev_private);
2730
2731         rte_cryptodev_pmd_release_device(cryptodev);
2732
2733         return -ENXIO;
2734 }
2735
2736 static int
2737 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2738 {
2739         struct rte_cryptodev *cryptodev;
2740         int ret;
2741
2742         cryptodev = dpaa_dev->crypto_dev;
2743         if (cryptodev == NULL)
2744                 return -ENODEV;
2745
2746         ret = dpaa_sec_uninit(cryptodev);
2747         if (ret)
2748                 return ret;
2749
2750         return rte_cryptodev_pmd_destroy(cryptodev);
2751 }
2752
2753 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2754         .drv_type = FSL_DPAA_CRYPTO,
2755         .driver = {
2756                 .name = "DPAA SEC PMD"
2757         },
2758         .probe = cryptodev_dpaa_sec_probe,
2759         .remove = cryptodev_dpaa_sec_remove,
2760 };
2761
2762 static struct cryptodev_driver dpaa_sec_crypto_drv;
2763
2764 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2765 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2766                 cryptodev_driver_id);
2767
2768 RTE_INIT(dpaa_sec_init_log)
2769 {
2770         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2771         if (dpaa_logtype_sec >= 0)
2772                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2773 }