a2f69e6af3537443f11e193363a9a078cdcbb468
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
43
44 enum rta_sec_era rta_sec_era;
45
46 int dpaa_logtype_sec;
47
48 static uint8_t cryptodev_driver_id;
49
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
52
53 static int
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
55
56 static inline void
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
58 {
59         if (!ctx->fd_status) {
60                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
61         } else {
62                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64         }
65 }
66
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
69 {
70         struct dpaa_sec_op_ctx *ctx;
71         int i, retval;
72
73         retval = rte_mempool_get(
74                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
75                         (void **)(&ctx));
76         if (!ctx || retval) {
77                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78                 return NULL;
79         }
80         /*
81          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84          * each packet, memset is costlier than dcbz_64().
85          */
86         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87                 dcbz_64(&ctx->job.sg[i]);
88
89         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
91
92         return ctx;
93 }
94
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98         const struct rte_memseg *ms;
99
100         ms = rte_mem_virt2memseg(vaddr, NULL);
101         if (ms) {
102                 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104         }
105         return (size_t)NULL;
106 }
107
108 static inline void *
109 dpaa_mem_ptov(rte_iova_t paddr)
110 {
111         void *va;
112
113         va = (void *)dpaax_iova_table_get_va(paddr);
114         if (likely(va))
115                 return va;
116
117         return rte_mem_iova2virt(paddr);
118 }
119
120 static void
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122                    struct qman_fq *fq,
123                    const struct qm_mr_entry *msg)
124 {
125         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
127 }
128
129 /* initialize the queue with dest chan as caam chan so that
130  * all the packets in this queue could be dispatched into caam
131  */
132 static int
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
134                  uint32_t fqid_out)
135 {
136         struct qm_mcc_initfq fq_opts;
137         uint32_t flags;
138         int ret = -1;
139
140         /* Clear FQ options */
141         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142
143         flags = QMAN_INITFQ_FLAG_SCHED;
144         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145                           QM_INITFQ_WE_CONTEXTB;
146
147         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148         fq_opts.fqd.context_b = fqid_out;
149         fq_opts.fqd.dest.channel = qm_channel_caam;
150         fq_opts.fqd.dest.wq = 0;
151
152         fq_in->cb.ern  = ern_sec_fq_handler;
153
154         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155
156         ret = qman_init_fq(fq_in, flags, &fq_opts);
157         if (unlikely(ret != 0))
158                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
159
160         return ret;
161 }
162
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166                   struct qman_fq *fq __always_unused,
167                   const struct qm_dqrr_entry *dqrr)
168 {
169         const struct qm_fd *fd;
170         struct dpaa_sec_job *job;
171         struct dpaa_sec_op_ctx *ctx;
172
173         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174                 return qman_cb_dqrr_defer;
175
176         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177                 return qman_cb_dqrr_consume;
178
179         fd = &dqrr->fd;
180         /* sg is embedded in an op ctx,
181          * sg[0] is for output
182          * sg[1] for input
183          */
184         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185
186         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187         ctx->fd_status = fd->status;
188         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189                 struct qm_sg_entry *sg_out;
190                 uint32_t len;
191                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
193
194                 sg_out = &job->sg[0];
195                 hw_sg_to_cpu(sg_out);
196                 len = sg_out->length;
197                 mbuf->pkt_len = len;
198                 while (mbuf->next != NULL) {
199                         len -= mbuf->data_len;
200                         mbuf = mbuf->next;
201                 }
202                 mbuf->data_len = len;
203         }
204         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205         dpaa_sec_op_ending(ctx);
206
207         return qman_cb_dqrr_consume;
208 }
209
210 /* caam result is put into this queue */
211 static int
212 dpaa_sec_init_tx(struct qman_fq *fq)
213 {
214         int ret;
215         struct qm_mcc_initfq opts;
216         uint32_t flags;
217
218         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219                 QMAN_FQ_FLAG_DYNAMIC_FQID;
220
221         ret = qman_create_fq(0, flags, fq);
222         if (unlikely(ret)) {
223                 DPAA_SEC_ERR("qman_create_fq failed");
224                 return ret;
225         }
226
227         memset(&opts, 0, sizeof(opts));
228         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
230
231         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
232
233         fq->cb.dqrr = dqrr_out_fq_cb_rx;
234         fq->cb.ern  = ern_sec_fq_handler;
235
236         ret = qman_init_fq(fq, 0, &opts);
237         if (unlikely(ret)) {
238                 DPAA_SEC_ERR("unable to init caam source fq!");
239                 return ret;
240         }
241
242         return ret;
243 }
244
245 static inline int is_cipher_only(dpaa_sec_session *ses)
246 {
247         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
249 }
250
251 static inline int is_auth_only(dpaa_sec_session *ses)
252 {
253         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
255 }
256
257 static inline int is_aead(dpaa_sec_session *ses)
258 {
259         return ((ses->cipher_alg == 0) &&
260                 (ses->auth_alg == 0) &&
261                 (ses->aead_alg != 0));
262 }
263
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
265 {
266         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
269                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC) &&
270                 (ses->aead_alg == 0));
271 }
272
273 static inline int is_proto_ipsec(dpaa_sec_session *ses)
274 {
275         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
276 }
277
278 static inline int is_proto_pdcp(dpaa_sec_session *ses)
279 {
280         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
281 }
282
283 static inline int is_encode(dpaa_sec_session *ses)
284 {
285         return ses->dir == DIR_ENC;
286 }
287
288 static inline int is_decode(dpaa_sec_session *ses)
289 {
290         return ses->dir == DIR_DEC;
291 }
292
293 static inline void
294 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
295 {
296         switch (ses->auth_alg) {
297         case RTE_CRYPTO_AUTH_NULL:
298                 alginfo_a->algtype =
299                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300                         OP_PCL_IPSEC_HMAC_NULL : 0;
301                 ses->digest_length = 0;
302                 break;
303         case RTE_CRYPTO_AUTH_MD5_HMAC:
304                 alginfo_a->algtype =
305                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
307                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308                 break;
309         case RTE_CRYPTO_AUTH_SHA1_HMAC:
310                 alginfo_a->algtype =
311                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
313                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
314                 break;
315         case RTE_CRYPTO_AUTH_SHA224_HMAC:
316                 alginfo_a->algtype =
317                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
318                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
319                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
320                 break;
321         case RTE_CRYPTO_AUTH_SHA256_HMAC:
322                 alginfo_a->algtype =
323                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
324                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
325                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
326                 break;
327         case RTE_CRYPTO_AUTH_SHA384_HMAC:
328                 alginfo_a->algtype =
329                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
330                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
331                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
332                 break;
333         case RTE_CRYPTO_AUTH_SHA512_HMAC:
334                 alginfo_a->algtype =
335                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
337                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
338                 break;
339         default:
340                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
341         }
342 }
343
344 static inline void
345 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
346 {
347         switch (ses->cipher_alg) {
348         case RTE_CRYPTO_CIPHER_NULL:
349                 alginfo_c->algtype =
350                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
351                         OP_PCL_IPSEC_NULL : 0;
352                 break;
353         case RTE_CRYPTO_CIPHER_AES_CBC:
354                 alginfo_c->algtype =
355                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
356                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
357                 alginfo_c->algmode = OP_ALG_AAI_CBC;
358                 break;
359         case RTE_CRYPTO_CIPHER_3DES_CBC:
360                 alginfo_c->algtype =
361                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
362                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
363                 alginfo_c->algmode = OP_ALG_AAI_CBC;
364                 break;
365         case RTE_CRYPTO_CIPHER_AES_CTR:
366                 alginfo_c->algtype =
367                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
368                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
369                 alginfo_c->algmode = OP_ALG_AAI_CTR;
370                 break;
371         default:
372                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
373         }
374 }
375
376 static inline void
377 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
378 {
379         switch (ses->aead_alg) {
380         case RTE_CRYPTO_AEAD_AES_GCM:
381                 alginfo->algtype = OP_ALG_ALGSEL_AES;
382                 alginfo->algmode = OP_ALG_AAI_GCM;
383                 break;
384         default:
385                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
386         }
387 }
388
389 static int
390 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
391 {
392         struct alginfo authdata = {0}, cipherdata = {0};
393         struct sec_cdb *cdb = &ses->cdb;
394         struct alginfo *p_authdata = NULL;
395         int32_t shared_desc_len = 0;
396         int err;
397 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
398         int swap = false;
399 #else
400         int swap = true;
401 #endif
402
403         switch (ses->cipher_alg) {
404         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
405                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
406                 break;
407         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
408                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
409                 break;
410         case RTE_CRYPTO_CIPHER_AES_CTR:
411                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
412                 break;
413         case RTE_CRYPTO_CIPHER_NULL:
414                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
415                 break;
416         default:
417                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
418                               ses->cipher_alg);
419                 return -1;
420         }
421
422         cipherdata.key = (size_t)ses->cipher_key.data;
423         cipherdata.keylen = ses->cipher_key.length;
424         cipherdata.key_enc_flags = 0;
425         cipherdata.key_type = RTA_DATA_IMM;
426
427         cdb->sh_desc[0] = cipherdata.keylen;
428         cdb->sh_desc[1] = 0;
429         cdb->sh_desc[2] = 0;
430
431         if (ses->auth_alg) {
432                 switch (ses->auth_alg) {
433                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
434                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
435                         break;
436                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
437                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
438                         break;
439                 case RTE_CRYPTO_AUTH_AES_CMAC:
440                         authdata.algtype = PDCP_AUTH_TYPE_AES;
441                         break;
442                 case RTE_CRYPTO_AUTH_NULL:
443                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
444                         break;
445                 default:
446                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
447                                       ses->auth_alg);
448                         return -1;
449                 }
450
451                 authdata.key = (size_t)ses->auth_key.data;
452                 authdata.keylen = ses->auth_key.length;
453                 authdata.key_enc_flags = 0;
454                 authdata.key_type = RTA_DATA_IMM;
455
456                 p_authdata = &authdata;
457
458                 cdb->sh_desc[1] = authdata.keylen;
459         }
460
461         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
462                                MIN_JOB_DESC_SIZE,
463                                (unsigned int *)cdb->sh_desc,
464                                &cdb->sh_desc[2], 2);
465         if (err < 0) {
466                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
467                 return err;
468         }
469
470         if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
471                 cipherdata.key =
472                         (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
473                 cipherdata.key_type = RTA_DATA_PTR;
474         }
475         if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
476                 authdata.key =
477                         (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
478                 authdata.key_type = RTA_DATA_PTR;
479         }
480
481         cdb->sh_desc[0] = 0;
482         cdb->sh_desc[1] = 0;
483         cdb->sh_desc[2] = 0;
484
485         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
486                 if (ses->dir == DIR_ENC)
487                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
488                                         cdb->sh_desc, 1, swap,
489                                         ses->pdcp.hfn,
490                                         ses->pdcp.sn_size,
491                                         ses->pdcp.bearer,
492                                         ses->pdcp.pkt_dir,
493                                         ses->pdcp.hfn_threshold,
494                                         &cipherdata, &authdata,
495                                         0);
496                 else if (ses->dir == DIR_DEC)
497                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
498                                         cdb->sh_desc, 1, swap,
499                                         ses->pdcp.hfn,
500                                         ses->pdcp.sn_size,
501                                         ses->pdcp.bearer,
502                                         ses->pdcp.pkt_dir,
503                                         ses->pdcp.hfn_threshold,
504                                         &cipherdata, &authdata,
505                                         0);
506         } else {
507                 if (ses->dir == DIR_ENC)
508                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509                                         cdb->sh_desc, 1, swap,
510                                         ses->pdcp.sn_size,
511                                         ses->pdcp.hfn,
512                                         ses->pdcp.bearer,
513                                         ses->pdcp.pkt_dir,
514                                         ses->pdcp.hfn_threshold,
515                                         &cipherdata, p_authdata, 0);
516                 else if (ses->dir == DIR_DEC)
517                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518                                         cdb->sh_desc, 1, swap,
519                                         ses->pdcp.sn_size,
520                                         ses->pdcp.hfn,
521                                         ses->pdcp.bearer,
522                                         ses->pdcp.pkt_dir,
523                                         ses->pdcp.hfn_threshold,
524                                         &cipherdata, p_authdata, 0);
525         }
526
527         return shared_desc_len;
528 }
529
530 /* prepare ipsec proto command block of the session */
531 static int
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
533 {
534         struct alginfo cipherdata = {0}, authdata = {0};
535         struct sec_cdb *cdb = &ses->cdb;
536         int32_t shared_desc_len = 0;
537         int err;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
539         int swap = false;
540 #else
541         int swap = true;
542 #endif
543
544         caam_cipher_alg(ses, &cipherdata);
545         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546                 DPAA_SEC_ERR("not supported cipher alg");
547                 return -ENOTSUP;
548         }
549
550         cipherdata.key = (size_t)ses->cipher_key.data;
551         cipherdata.keylen = ses->cipher_key.length;
552         cipherdata.key_enc_flags = 0;
553         cipherdata.key_type = RTA_DATA_IMM;
554
555         caam_auth_alg(ses, &authdata);
556         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557                 DPAA_SEC_ERR("not supported auth alg");
558                 return -ENOTSUP;
559         }
560
561         authdata.key = (size_t)ses->auth_key.data;
562         authdata.keylen = ses->auth_key.length;
563         authdata.key_enc_flags = 0;
564         authdata.key_type = RTA_DATA_IMM;
565
566         cdb->sh_desc[0] = cipherdata.keylen;
567         cdb->sh_desc[1] = authdata.keylen;
568         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
569                                MIN_JOB_DESC_SIZE,
570                                (unsigned int *)cdb->sh_desc,
571                                &cdb->sh_desc[2], 2);
572
573         if (err < 0) {
574                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
575                 return err;
576         }
577         if (cdb->sh_desc[2] & 1)
578                 cipherdata.key_type = RTA_DATA_IMM;
579         else {
580                 cipherdata.key = (size_t)dpaa_mem_vtop(
581                                         (void *)(size_t)cipherdata.key);
582                 cipherdata.key_type = RTA_DATA_PTR;
583         }
584         if (cdb->sh_desc[2] & (1<<1))
585                 authdata.key_type = RTA_DATA_IMM;
586         else {
587                 authdata.key = (size_t)dpaa_mem_vtop(
588                                         (void *)(size_t)authdata.key);
589                 authdata.key_type = RTA_DATA_PTR;
590         }
591
592         cdb->sh_desc[0] = 0;
593         cdb->sh_desc[1] = 0;
594         cdb->sh_desc[2] = 0;
595         if (ses->dir == DIR_ENC) {
596                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
597                                 cdb->sh_desc,
598                                 true, swap, SHR_SERIAL,
599                                 &ses->encap_pdb,
600                                 (uint8_t *)&ses->ip4_hdr,
601                                 &cipherdata, &authdata);
602         } else if (ses->dir == DIR_DEC) {
603                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
604                                 cdb->sh_desc,
605                                 true, swap, SHR_SERIAL,
606                                 &ses->decap_pdb,
607                                 &cipherdata, &authdata);
608         }
609         return shared_desc_len;
610 }
611
612 /* prepare command block of the session */
613 static int
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
615 {
616         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617         int32_t shared_desc_len = 0;
618         struct sec_cdb *cdb = &ses->cdb;
619         int err;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
621         int swap = false;
622 #else
623         int swap = true;
624 #endif
625
626         memset(cdb, 0, sizeof(struct sec_cdb));
627
628         if (is_proto_ipsec(ses)) {
629                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630         } else if (is_proto_pdcp(ses)) {
631                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632         } else if (is_cipher_only(ses)) {
633                 alginfo_c.key = (size_t)ses->cipher_key.data;
634                 alginfo_c.keylen = ses->cipher_key.length;
635                 alginfo_c.key_enc_flags = 0;
636                 alginfo_c.key_type = RTA_DATA_IMM;
637                 switch (ses->cipher_alg) {
638                 case RTE_CRYPTO_CIPHER_NULL:
639                         alginfo_c.algtype = 0;
640                         shared_desc_len = cnstr_shdsc_blkcipher(
641                                         cdb->sh_desc, true,
642                                         swap, SHR_NEVER, &alginfo_c,
643                                         NULL,
644                                         ses->iv.length,
645                                         ses->dir);
646                         break;
647                 case RTE_CRYPTO_CIPHER_AES_CBC:
648                         alginfo_c.algtype = OP_ALG_ALGSEL_AES;
649                         alginfo_c.algmode = OP_ALG_AAI_CBC;
650                         shared_desc_len = cnstr_shdsc_blkcipher(
651                                         cdb->sh_desc, true,
652                                         swap, SHR_NEVER, &alginfo_c,
653                                         NULL,
654                                         ses->iv.length,
655                                         ses->dir);
656                         break;
657                 case RTE_CRYPTO_CIPHER_3DES_CBC:
658                         alginfo_c.algtype = OP_ALG_ALGSEL_3DES;
659                         alginfo_c.algmode = OP_ALG_AAI_CBC;
660                         shared_desc_len = cnstr_shdsc_blkcipher(
661                                         cdb->sh_desc, true,
662                                         swap, SHR_NEVER, &alginfo_c,
663                                         NULL,
664                                         ses->iv.length,
665                                         ses->dir);
666                         break;
667                 case RTE_CRYPTO_CIPHER_AES_CTR:
668                         alginfo_c.algtype = OP_ALG_ALGSEL_AES;
669                         alginfo_c.algmode = OP_ALG_AAI_CTR;
670                         shared_desc_len = cnstr_shdsc_blkcipher(
671                                         cdb->sh_desc, true,
672                                         swap, SHR_NEVER, &alginfo_c,
673                                         NULL,
674                                         ses->iv.length,
675                                         ses->dir);
676                         break;
677                 case RTE_CRYPTO_CIPHER_3DES_CTR:
678                         alginfo_c.algtype = OP_ALG_ALGSEL_3DES;
679                         alginfo_c.algmode = OP_ALG_AAI_CTR;
680                         shared_desc_len = cnstr_shdsc_blkcipher(
681                                         cdb->sh_desc, true,
682                                         swap, SHR_NEVER, &alginfo_c,
683                                         NULL,
684                                         ses->iv.length,
685                                         ses->dir);
686                         break;
687                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
688                         alginfo_c.algtype = OP_ALG_ALGSEL_SNOW_F8;
689                         shared_desc_len = cnstr_shdsc_snow_f8(
690                                         cdb->sh_desc, true, swap,
691                                         &alginfo_c,
692                                         ses->dir);
693                         break;
694                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
695                         alginfo_c.algtype = OP_ALG_ALGSEL_ZUCE;
696                         shared_desc_len = cnstr_shdsc_zuce(
697                                         cdb->sh_desc, true, swap,
698                                         &alginfo_c,
699                                         ses->dir);
700                         break;
701                 default:
702                         DPAA_SEC_ERR("unsupported cipher alg %d",
703                                      ses->cipher_alg);
704                         return -ENOTSUP;
705                 }
706         } else if (is_auth_only(ses)) {
707                 alginfo_a.key = (size_t)ses->auth_key.data;
708                 alginfo_a.keylen = ses->auth_key.length;
709                 alginfo_a.key_enc_flags = 0;
710                 alginfo_a.key_type = RTA_DATA_IMM;
711                 switch (ses->auth_alg) {
712                 case RTE_CRYPTO_AUTH_NULL:
713                         alginfo_a.algtype = 0;
714                         ses->digest_length = 0;
715                         shared_desc_len = cnstr_shdsc_hmac(
716                                                 cdb->sh_desc, true,
717                                                 swap, SHR_NEVER, &alginfo_a,
718                                                 !ses->dir,
719                                                 ses->digest_length);
720                         break;
721                 case RTE_CRYPTO_AUTH_MD5_HMAC:
722                         alginfo_a.algtype = OP_ALG_ALGSEL_MD5;
723                         alginfo_a.algmode = OP_ALG_AAI_HMAC;
724                         shared_desc_len = cnstr_shdsc_hmac(
725                                                 cdb->sh_desc, true,
726                                                 swap, SHR_NEVER, &alginfo_a,
727                                                 !ses->dir,
728                                                 ses->digest_length);
729                         break;
730                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
731                         alginfo_a.algtype = OP_ALG_ALGSEL_SHA1;
732                         alginfo_a.algmode = OP_ALG_AAI_HMAC;
733                         shared_desc_len = cnstr_shdsc_hmac(
734                                                 cdb->sh_desc, true,
735                                                 swap, SHR_NEVER, &alginfo_a,
736                                                 !ses->dir,
737                                                 ses->digest_length);
738                         break;
739                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
740                         alginfo_a.algtype = OP_ALG_ALGSEL_SHA224;
741                         alginfo_a.algmode = OP_ALG_AAI_HMAC;
742                         shared_desc_len = cnstr_shdsc_hmac(
743                                                 cdb->sh_desc, true,
744                                                 swap, SHR_NEVER, &alginfo_a,
745                                                 !ses->dir,
746                                                 ses->digest_length);
747                         break;
748                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
749                         alginfo_a.algtype = OP_ALG_ALGSEL_SHA256;
750                         alginfo_a.algmode = OP_ALG_AAI_HMAC;
751                         shared_desc_len = cnstr_shdsc_hmac(
752                                                 cdb->sh_desc, true,
753                                                 swap, SHR_NEVER, &alginfo_a,
754                                                 !ses->dir,
755                                                 ses->digest_length);
756                         break;
757                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
758                         alginfo_a.algtype = OP_ALG_ALGSEL_SHA384;
759                         alginfo_a.algmode = OP_ALG_AAI_HMAC;
760                         shared_desc_len = cnstr_shdsc_hmac(
761                                                 cdb->sh_desc, true,
762                                                 swap, SHR_NEVER, &alginfo_a,
763                                                 !ses->dir,
764                                                 ses->digest_length);
765                         break;
766                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
767                         alginfo_a.algtype = OP_ALG_ALGSEL_SHA512;
768                         alginfo_a.algmode = OP_ALG_AAI_HMAC;
769                         shared_desc_len = cnstr_shdsc_hmac(
770                                                 cdb->sh_desc, true,
771                                                 swap, SHR_NEVER, &alginfo_a,
772                                                 !ses->dir,
773                                                 ses->digest_length);
774                         break;
775                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
776                         alginfo_a.algtype = OP_ALG_ALGSEL_SNOW_F9;
777                         alginfo_a.algmode = OP_ALG_AAI_F9;
778                         ses->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
779                         shared_desc_len = cnstr_shdsc_snow_f9(
780                                                 cdb->sh_desc, true, swap,
781                                                 &alginfo_a,
782                                                 !ses->dir,
783                                                 ses->digest_length);
784                         break;
785                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
786                         alginfo_a.algtype = OP_ALG_ALGSEL_ZUCA;
787                         alginfo_a.algmode = OP_ALG_AAI_F9;
788                         ses->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
789                         shared_desc_len = cnstr_shdsc_zuca(
790                                                 cdb->sh_desc, true, swap,
791                                                 &alginfo_a,
792                                                 !ses->dir,
793                                                 ses->digest_length);
794                         break;
795                 default:
796                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
797                 }
798         } else if (is_aead(ses)) {
799                 caam_aead_alg(ses, &alginfo);
800                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
801                         DPAA_SEC_ERR("not supported aead alg");
802                         return -ENOTSUP;
803                 }
804                 alginfo.key = (size_t)ses->aead_key.data;
805                 alginfo.keylen = ses->aead_key.length;
806                 alginfo.key_enc_flags = 0;
807                 alginfo.key_type = RTA_DATA_IMM;
808
809                 if (ses->dir == DIR_ENC)
810                         shared_desc_len = cnstr_shdsc_gcm_encap(
811                                         cdb->sh_desc, true, swap, SHR_NEVER,
812                                         &alginfo,
813                                         ses->iv.length,
814                                         ses->digest_length);
815                 else
816                         shared_desc_len = cnstr_shdsc_gcm_decap(
817                                         cdb->sh_desc, true, swap, SHR_NEVER,
818                                         &alginfo,
819                                         ses->iv.length,
820                                         ses->digest_length);
821         } else {
822                 caam_cipher_alg(ses, &alginfo_c);
823                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
824                         DPAA_SEC_ERR("not supported cipher alg");
825                         return -ENOTSUP;
826                 }
827
828                 alginfo_c.key = (size_t)ses->cipher_key.data;
829                 alginfo_c.keylen = ses->cipher_key.length;
830                 alginfo_c.key_enc_flags = 0;
831                 alginfo_c.key_type = RTA_DATA_IMM;
832
833                 caam_auth_alg(ses, &alginfo_a);
834                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
835                         DPAA_SEC_ERR("not supported auth alg");
836                         return -ENOTSUP;
837                 }
838
839                 alginfo_a.key = (size_t)ses->auth_key.data;
840                 alginfo_a.keylen = ses->auth_key.length;
841                 alginfo_a.key_enc_flags = 0;
842                 alginfo_a.key_type = RTA_DATA_IMM;
843
844                 cdb->sh_desc[0] = alginfo_c.keylen;
845                 cdb->sh_desc[1] = alginfo_a.keylen;
846                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
847                                        MIN_JOB_DESC_SIZE,
848                                        (unsigned int *)cdb->sh_desc,
849                                        &cdb->sh_desc[2], 2);
850
851                 if (err < 0) {
852                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
853                         return err;
854                 }
855                 if (cdb->sh_desc[2] & 1)
856                         alginfo_c.key_type = RTA_DATA_IMM;
857                 else {
858                         alginfo_c.key = (size_t)dpaa_mem_vtop(
859                                                 (void *)(size_t)alginfo_c.key);
860                         alginfo_c.key_type = RTA_DATA_PTR;
861                 }
862                 if (cdb->sh_desc[2] & (1<<1))
863                         alginfo_a.key_type = RTA_DATA_IMM;
864                 else {
865                         alginfo_a.key = (size_t)dpaa_mem_vtop(
866                                                 (void *)(size_t)alginfo_a.key);
867                         alginfo_a.key_type = RTA_DATA_PTR;
868                 }
869                 cdb->sh_desc[0] = 0;
870                 cdb->sh_desc[1] = 0;
871                 cdb->sh_desc[2] = 0;
872                 /* Auth_only_len is set as 0 here and it will be
873                  * overwritten in fd for each packet.
874                  */
875                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
876                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
877                                 ses->iv.length,
878                                 ses->digest_length, ses->dir);
879         }
880
881         if (shared_desc_len < 0) {
882                 DPAA_SEC_ERR("error in preparing command block");
883                 return shared_desc_len;
884         }
885
886         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
887         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
888         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
889
890         return 0;
891 }
892
893 /* qp is lockless, should be accessed by only one thread */
894 static int
895 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
896 {
897         struct qman_fq *fq;
898         unsigned int pkts = 0;
899         int num_rx_bufs, ret;
900         struct qm_dqrr_entry *dq;
901         uint32_t vdqcr_flags = 0;
902
903         fq = &qp->outq;
904         /*
905          * Until request for four buffers, we provide exact number of buffers.
906          * Otherwise we do not set the QM_VDQCR_EXACT flag.
907          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
908          * requested, so we request two less in this case.
909          */
910         if (nb_ops < 4) {
911                 vdqcr_flags = QM_VDQCR_EXACT;
912                 num_rx_bufs = nb_ops;
913         } else {
914                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
915                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
916         }
917         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
918         if (ret)
919                 return 0;
920
921         do {
922                 const struct qm_fd *fd;
923                 struct dpaa_sec_job *job;
924                 struct dpaa_sec_op_ctx *ctx;
925                 struct rte_crypto_op *op;
926
927                 dq = qman_dequeue(fq);
928                 if (!dq)
929                         continue;
930
931                 fd = &dq->fd;
932                 /* sg is embedded in an op ctx,
933                  * sg[0] is for output
934                  * sg[1] for input
935                  */
936                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
937
938                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
939                 ctx->fd_status = fd->status;
940                 op = ctx->op;
941                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
942                         struct qm_sg_entry *sg_out;
943                         uint32_t len;
944                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
945                                                 op->sym->m_src : op->sym->m_dst;
946
947                         sg_out = &job->sg[0];
948                         hw_sg_to_cpu(sg_out);
949                         len = sg_out->length;
950                         mbuf->pkt_len = len;
951                         while (mbuf->next != NULL) {
952                                 len -= mbuf->data_len;
953                                 mbuf = mbuf->next;
954                         }
955                         mbuf->data_len = len;
956                 }
957                 if (!ctx->fd_status) {
958                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
959                 } else {
960                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
961                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
962                 }
963                 ops[pkts++] = op;
964
965                 /* report op status to sym->op and then free the ctx memeory */
966                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
967
968                 qman_dqrr_consume(fq, dq);
969         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
970
971         return pkts;
972 }
973
974 static inline struct dpaa_sec_job *
975 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
976 {
977         struct rte_crypto_sym_op *sym = op->sym;
978         struct rte_mbuf *mbuf = sym->m_src;
979         struct dpaa_sec_job *cf;
980         struct dpaa_sec_op_ctx *ctx;
981         struct qm_sg_entry *sg, *out_sg, *in_sg;
982         phys_addr_t start_addr;
983         uint8_t *old_digest, extra_segs;
984         int data_len, data_offset;
985
986         data_len = sym->auth.data.length;
987         data_offset = sym->auth.data.offset;
988
989         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
990             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
991                 if ((data_len & 7) || (data_offset & 7)) {
992                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
993                         return NULL;
994                 }
995
996                 data_len = data_len >> 3;
997                 data_offset = data_offset >> 3;
998         }
999
1000         if (is_decode(ses))
1001                 extra_segs = 3;
1002         else
1003                 extra_segs = 2;
1004
1005         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1006                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
1007                                 MAX_SG_ENTRIES);
1008                 return NULL;
1009         }
1010         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
1011         if (!ctx)
1012                 return NULL;
1013
1014         cf = &ctx->job;
1015         ctx->op = op;
1016         old_digest = ctx->digest;
1017
1018         /* output */
1019         out_sg = &cf->sg[0];
1020         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
1021         out_sg->length = ses->digest_length;
1022         cpu_to_hw_sg(out_sg);
1023
1024         /* input */
1025         in_sg = &cf->sg[1];
1026         /* need to extend the input to a compound frame */
1027         in_sg->extension = 1;
1028         in_sg->final = 1;
1029         in_sg->length = data_len;
1030         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
1031
1032         /* 1st seg */
1033         sg = in_sg + 1;
1034
1035         if (ses->iv.length) {
1036                 uint8_t *iv_ptr;
1037
1038                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1039                                                    ses->iv.offset);
1040
1041                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1042                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1043                         sg->length = 12;
1044                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1045                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1046                         sg->length = 8;
1047                 } else {
1048                         sg->length = ses->iv.length;
1049                 }
1050                 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
1051                 in_sg->length += sg->length;
1052                 cpu_to_hw_sg(sg);
1053                 sg++;
1054         }
1055
1056         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1057         sg->offset = data_offset;
1058
1059         if (data_len <= (mbuf->data_len - data_offset)) {
1060                 sg->length = data_len;
1061         } else {
1062                 sg->length = mbuf->data_len - data_offset;
1063
1064                 /* remaining i/p segs */
1065                 while ((data_len = data_len - sg->length) &&
1066                        (mbuf = mbuf->next)) {
1067                         cpu_to_hw_sg(sg);
1068                         sg++;
1069                         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1070                         if (data_len > mbuf->data_len)
1071                                 sg->length = mbuf->data_len;
1072                         else
1073                                 sg->length = data_len;
1074                 }
1075         }
1076
1077         if (is_decode(ses)) {
1078                 /* Digest verification case */
1079                 cpu_to_hw_sg(sg);
1080                 sg++;
1081                 rte_memcpy(old_digest, sym->auth.digest.data,
1082                                 ses->digest_length);
1083                 start_addr = dpaa_mem_vtop(old_digest);
1084                 qm_sg_entry_set64(sg, start_addr);
1085                 sg->length = ses->digest_length;
1086                 in_sg->length += ses->digest_length;
1087         }
1088         sg->final = 1;
1089         cpu_to_hw_sg(sg);
1090         cpu_to_hw_sg(in_sg);
1091
1092         return cf;
1093 }
1094
1095 /**
1096  * packet looks like:
1097  *              |<----data_len------->|
1098  *    |ip_header|ah_header|icv|payload|
1099  *              ^
1100  *              |
1101  *         mbuf->pkt.data
1102  */
1103 static inline struct dpaa_sec_job *
1104 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1105 {
1106         struct rte_crypto_sym_op *sym = op->sym;
1107         struct rte_mbuf *mbuf = sym->m_src;
1108         struct dpaa_sec_job *cf;
1109         struct dpaa_sec_op_ctx *ctx;
1110         struct qm_sg_entry *sg, *in_sg;
1111         rte_iova_t start_addr;
1112         uint8_t *old_digest;
1113         int data_len, data_offset;
1114
1115         data_len = sym->auth.data.length;
1116         data_offset = sym->auth.data.offset;
1117
1118         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1119             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1120                 if ((data_len & 7) || (data_offset & 7)) {
1121                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1122                         return NULL;
1123                 }
1124
1125                 data_len = data_len >> 3;
1126                 data_offset = data_offset >> 3;
1127         }
1128
1129         ctx = dpaa_sec_alloc_ctx(ses, 4);
1130         if (!ctx)
1131                 return NULL;
1132
1133         cf = &ctx->job;
1134         ctx->op = op;
1135         old_digest = ctx->digest;
1136
1137         start_addr = rte_pktmbuf_iova(mbuf);
1138         /* output */
1139         sg = &cf->sg[0];
1140         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1141         sg->length = ses->digest_length;
1142         cpu_to_hw_sg(sg);
1143
1144         /* input */
1145         in_sg = &cf->sg[1];
1146         /* need to extend the input to a compound frame */
1147         in_sg->extension = 1;
1148         in_sg->final = 1;
1149         in_sg->length = data_len;
1150         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
1151         sg = &cf->sg[2];
1152
1153         if (ses->iv.length) {
1154                 uint8_t *iv_ptr;
1155
1156                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1157                                                    ses->iv.offset);
1158
1159                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1160                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1161                         sg->length = 12;
1162                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1163                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1164                         sg->length = 8;
1165                 } else {
1166                         sg->length = ses->iv.length;
1167                 }
1168                 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
1169                 in_sg->length += sg->length;
1170                 cpu_to_hw_sg(sg);
1171                 sg++;
1172         }
1173
1174         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1175         sg->offset = data_offset;
1176         sg->length = data_len;
1177
1178         if (is_decode(ses)) {
1179                 /* Digest verification case */
1180                 cpu_to_hw_sg(sg);
1181                 /* hash result or digest, save digest first */
1182                 rte_memcpy(old_digest, sym->auth.digest.data,
1183                                 ses->digest_length);
1184                 /* let's check digest by hw */
1185                 start_addr = dpaa_mem_vtop(old_digest);
1186                 sg++;
1187                 qm_sg_entry_set64(sg, start_addr);
1188                 sg->length = ses->digest_length;
1189                 in_sg->length += ses->digest_length;
1190         }
1191         sg->final = 1;
1192         cpu_to_hw_sg(sg);
1193         cpu_to_hw_sg(in_sg);
1194
1195         return cf;
1196 }
1197
1198 static inline struct dpaa_sec_job *
1199 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1200 {
1201         struct rte_crypto_sym_op *sym = op->sym;
1202         struct dpaa_sec_job *cf;
1203         struct dpaa_sec_op_ctx *ctx;
1204         struct qm_sg_entry *sg, *out_sg, *in_sg;
1205         struct rte_mbuf *mbuf;
1206         uint8_t req_segs;
1207         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1208                         ses->iv.offset);
1209         int data_len, data_offset;
1210
1211         data_len = sym->cipher.data.length;
1212         data_offset = sym->cipher.data.offset;
1213
1214         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1215                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1216                 if ((data_len & 7) || (data_offset & 7)) {
1217                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1218                         return NULL;
1219                 }
1220
1221                 data_len = data_len >> 3;
1222                 data_offset = data_offset >> 3;
1223         }
1224
1225         if (sym->m_dst) {
1226                 mbuf = sym->m_dst;
1227                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1228         } else {
1229                 mbuf = sym->m_src;
1230                 req_segs = mbuf->nb_segs * 2 + 3;
1231         }
1232         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1233                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1234                                 MAX_SG_ENTRIES);
1235                 return NULL;
1236         }
1237
1238         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1239         if (!ctx)
1240                 return NULL;
1241
1242         cf = &ctx->job;
1243         ctx->op = op;
1244
1245         /* output */
1246         out_sg = &cf->sg[0];
1247         out_sg->extension = 1;
1248         out_sg->length = data_len;
1249         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1250         cpu_to_hw_sg(out_sg);
1251
1252         /* 1st seg */
1253         sg = &cf->sg[2];
1254         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1255         sg->length = mbuf->data_len - data_offset;
1256         sg->offset = data_offset;
1257
1258         /* Successive segs */
1259         mbuf = mbuf->next;
1260         while (mbuf) {
1261                 cpu_to_hw_sg(sg);
1262                 sg++;
1263                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1264                 sg->length = mbuf->data_len;
1265                 mbuf = mbuf->next;
1266         }
1267         sg->final = 1;
1268         cpu_to_hw_sg(sg);
1269
1270         /* input */
1271         mbuf = sym->m_src;
1272         in_sg = &cf->sg[1];
1273         in_sg->extension = 1;
1274         in_sg->final = 1;
1275         in_sg->length = data_len + ses->iv.length;
1276
1277         sg++;
1278         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1279         cpu_to_hw_sg(in_sg);
1280
1281         /* IV */
1282         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1283         sg->length = ses->iv.length;
1284         cpu_to_hw_sg(sg);
1285
1286         /* 1st seg */
1287         sg++;
1288         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1289         sg->length = mbuf->data_len - data_offset;
1290         sg->offset = data_offset;
1291
1292         /* Successive segs */
1293         mbuf = mbuf->next;
1294         while (mbuf) {
1295                 cpu_to_hw_sg(sg);
1296                 sg++;
1297                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1298                 sg->length = mbuf->data_len;
1299                 mbuf = mbuf->next;
1300         }
1301         sg->final = 1;
1302         cpu_to_hw_sg(sg);
1303
1304         return cf;
1305 }
1306
1307 static inline struct dpaa_sec_job *
1308 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1309 {
1310         struct rte_crypto_sym_op *sym = op->sym;
1311         struct dpaa_sec_job *cf;
1312         struct dpaa_sec_op_ctx *ctx;
1313         struct qm_sg_entry *sg;
1314         rte_iova_t src_start_addr, dst_start_addr;
1315         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1316                         ses->iv.offset);
1317         int data_len, data_offset;
1318
1319         data_len = sym->cipher.data.length;
1320         data_offset = sym->cipher.data.offset;
1321
1322         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1323                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1324                 if ((data_len & 7) || (data_offset & 7)) {
1325                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1326                         return NULL;
1327                 }
1328
1329                 data_len = data_len >> 3;
1330                 data_offset = data_offset >> 3;
1331         }
1332
1333         ctx = dpaa_sec_alloc_ctx(ses, 4);
1334         if (!ctx)
1335                 return NULL;
1336
1337         cf = &ctx->job;
1338         ctx->op = op;
1339
1340         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1341
1342         if (sym->m_dst)
1343                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1344         else
1345                 dst_start_addr = src_start_addr;
1346
1347         /* output */
1348         sg = &cf->sg[0];
1349         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1350         sg->length = data_len + ses->iv.length;
1351         cpu_to_hw_sg(sg);
1352
1353         /* input */
1354         sg = &cf->sg[1];
1355
1356         /* need to extend the input to a compound frame */
1357         sg->extension = 1;
1358         sg->final = 1;
1359         sg->length = data_len + ses->iv.length;
1360         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1361         cpu_to_hw_sg(sg);
1362
1363         sg = &cf->sg[2];
1364         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1365         sg->length = ses->iv.length;
1366         cpu_to_hw_sg(sg);
1367
1368         sg++;
1369         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1370         sg->length = data_len;
1371         sg->final = 1;
1372         cpu_to_hw_sg(sg);
1373
1374         return cf;
1375 }
1376
1377 static inline struct dpaa_sec_job *
1378 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1379 {
1380         struct rte_crypto_sym_op *sym = op->sym;
1381         struct dpaa_sec_job *cf;
1382         struct dpaa_sec_op_ctx *ctx;
1383         struct qm_sg_entry *sg, *out_sg, *in_sg;
1384         struct rte_mbuf *mbuf;
1385         uint8_t req_segs;
1386         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1387                         ses->iv.offset);
1388
1389         if (sym->m_dst) {
1390                 mbuf = sym->m_dst;
1391                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1392         } else {
1393                 mbuf = sym->m_src;
1394                 req_segs = mbuf->nb_segs * 2 + 4;
1395         }
1396
1397         if (ses->auth_only_len)
1398                 req_segs++;
1399
1400         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1401                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1402                                 MAX_SG_ENTRIES);
1403                 return NULL;
1404         }
1405
1406         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1407         if (!ctx)
1408                 return NULL;
1409
1410         cf = &ctx->job;
1411         ctx->op = op;
1412
1413         rte_prefetch0(cf->sg);
1414
1415         /* output */
1416         out_sg = &cf->sg[0];
1417         out_sg->extension = 1;
1418         if (is_encode(ses))
1419                 out_sg->length = sym->aead.data.length + ses->digest_length;
1420         else
1421                 out_sg->length = sym->aead.data.length;
1422
1423         /* output sg entries */
1424         sg = &cf->sg[2];
1425         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1426         cpu_to_hw_sg(out_sg);
1427
1428         /* 1st seg */
1429         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1430         sg->length = mbuf->data_len - sym->aead.data.offset;
1431         sg->offset = sym->aead.data.offset;
1432
1433         /* Successive segs */
1434         mbuf = mbuf->next;
1435         while (mbuf) {
1436                 cpu_to_hw_sg(sg);
1437                 sg++;
1438                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1439                 sg->length = mbuf->data_len;
1440                 mbuf = mbuf->next;
1441         }
1442         sg->length -= ses->digest_length;
1443
1444         if (is_encode(ses)) {
1445                 cpu_to_hw_sg(sg);
1446                 /* set auth output */
1447                 sg++;
1448                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1449                 sg->length = ses->digest_length;
1450         }
1451         sg->final = 1;
1452         cpu_to_hw_sg(sg);
1453
1454         /* input */
1455         mbuf = sym->m_src;
1456         in_sg = &cf->sg[1];
1457         in_sg->extension = 1;
1458         in_sg->final = 1;
1459         if (is_encode(ses))
1460                 in_sg->length = ses->iv.length + sym->aead.data.length
1461                                                         + ses->auth_only_len;
1462         else
1463                 in_sg->length = ses->iv.length + sym->aead.data.length
1464                                 + ses->auth_only_len + ses->digest_length;
1465
1466         /* input sg entries */
1467         sg++;
1468         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1469         cpu_to_hw_sg(in_sg);
1470
1471         /* 1st seg IV */
1472         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1473         sg->length = ses->iv.length;
1474         cpu_to_hw_sg(sg);
1475
1476         /* 2nd seg auth only */
1477         if (ses->auth_only_len) {
1478                 sg++;
1479                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1480                 sg->length = ses->auth_only_len;
1481                 cpu_to_hw_sg(sg);
1482         }
1483
1484         /* 3rd seg */
1485         sg++;
1486         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1487         sg->length = mbuf->data_len - sym->aead.data.offset;
1488         sg->offset = sym->aead.data.offset;
1489
1490         /* Successive segs */
1491         mbuf = mbuf->next;
1492         while (mbuf) {
1493                 cpu_to_hw_sg(sg);
1494                 sg++;
1495                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1496                 sg->length = mbuf->data_len;
1497                 mbuf = mbuf->next;
1498         }
1499
1500         if (is_decode(ses)) {
1501                 cpu_to_hw_sg(sg);
1502                 sg++;
1503                 memcpy(ctx->digest, sym->aead.digest.data,
1504                         ses->digest_length);
1505                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1506                 sg->length = ses->digest_length;
1507         }
1508         sg->final = 1;
1509         cpu_to_hw_sg(sg);
1510
1511         return cf;
1512 }
1513
1514 static inline struct dpaa_sec_job *
1515 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1516 {
1517         struct rte_crypto_sym_op *sym = op->sym;
1518         struct dpaa_sec_job *cf;
1519         struct dpaa_sec_op_ctx *ctx;
1520         struct qm_sg_entry *sg;
1521         uint32_t length = 0;
1522         rte_iova_t src_start_addr, dst_start_addr;
1523         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1524                         ses->iv.offset);
1525
1526         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1527
1528         if (sym->m_dst)
1529                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1530         else
1531                 dst_start_addr = src_start_addr;
1532
1533         ctx = dpaa_sec_alloc_ctx(ses, 7);
1534         if (!ctx)
1535                 return NULL;
1536
1537         cf = &ctx->job;
1538         ctx->op = op;
1539
1540         /* input */
1541         rte_prefetch0(cf->sg);
1542         sg = &cf->sg[2];
1543         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1544         if (is_encode(ses)) {
1545                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1546                 sg->length = ses->iv.length;
1547                 length += sg->length;
1548                 cpu_to_hw_sg(sg);
1549
1550                 sg++;
1551                 if (ses->auth_only_len) {
1552                         qm_sg_entry_set64(sg,
1553                                           dpaa_mem_vtop(sym->aead.aad.data));
1554                         sg->length = ses->auth_only_len;
1555                         length += sg->length;
1556                         cpu_to_hw_sg(sg);
1557                         sg++;
1558                 }
1559                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1560                 sg->length = sym->aead.data.length;
1561                 length += sg->length;
1562                 sg->final = 1;
1563                 cpu_to_hw_sg(sg);
1564         } else {
1565                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1566                 sg->length = ses->iv.length;
1567                 length += sg->length;
1568                 cpu_to_hw_sg(sg);
1569
1570                 sg++;
1571                 if (ses->auth_only_len) {
1572                         qm_sg_entry_set64(sg,
1573                                           dpaa_mem_vtop(sym->aead.aad.data));
1574                         sg->length = ses->auth_only_len;
1575                         length += sg->length;
1576                         cpu_to_hw_sg(sg);
1577                         sg++;
1578                 }
1579                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1580                 sg->length = sym->aead.data.length;
1581                 length += sg->length;
1582                 cpu_to_hw_sg(sg);
1583
1584                 memcpy(ctx->digest, sym->aead.digest.data,
1585                        ses->digest_length);
1586                 sg++;
1587
1588                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1589                 sg->length = ses->digest_length;
1590                 length += sg->length;
1591                 sg->final = 1;
1592                 cpu_to_hw_sg(sg);
1593         }
1594         /* input compound frame */
1595         cf->sg[1].length = length;
1596         cf->sg[1].extension = 1;
1597         cf->sg[1].final = 1;
1598         cpu_to_hw_sg(&cf->sg[1]);
1599
1600         /* output */
1601         sg++;
1602         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1603         qm_sg_entry_set64(sg,
1604                 dst_start_addr + sym->aead.data.offset);
1605         sg->length = sym->aead.data.length;
1606         length = sg->length;
1607         if (is_encode(ses)) {
1608                 cpu_to_hw_sg(sg);
1609                 /* set auth output */
1610                 sg++;
1611                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1612                 sg->length = ses->digest_length;
1613                 length += sg->length;
1614         }
1615         sg->final = 1;
1616         cpu_to_hw_sg(sg);
1617
1618         /* output compound frame */
1619         cf->sg[0].length = length;
1620         cf->sg[0].extension = 1;
1621         cpu_to_hw_sg(&cf->sg[0]);
1622
1623         return cf;
1624 }
1625
1626 static inline struct dpaa_sec_job *
1627 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1628 {
1629         struct rte_crypto_sym_op *sym = op->sym;
1630         struct dpaa_sec_job *cf;
1631         struct dpaa_sec_op_ctx *ctx;
1632         struct qm_sg_entry *sg, *out_sg, *in_sg;
1633         struct rte_mbuf *mbuf;
1634         uint8_t req_segs;
1635         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1636                         ses->iv.offset);
1637
1638         if (sym->m_dst) {
1639                 mbuf = sym->m_dst;
1640                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1641         } else {
1642                 mbuf = sym->m_src;
1643                 req_segs = mbuf->nb_segs * 2 + 4;
1644         }
1645
1646         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1647                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1648                                 MAX_SG_ENTRIES);
1649                 return NULL;
1650         }
1651
1652         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1653         if (!ctx)
1654                 return NULL;
1655
1656         cf = &ctx->job;
1657         ctx->op = op;
1658
1659         rte_prefetch0(cf->sg);
1660
1661         /* output */
1662         out_sg = &cf->sg[0];
1663         out_sg->extension = 1;
1664         if (is_encode(ses))
1665                 out_sg->length = sym->auth.data.length + ses->digest_length;
1666         else
1667                 out_sg->length = sym->auth.data.length;
1668
1669         /* output sg entries */
1670         sg = &cf->sg[2];
1671         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1672         cpu_to_hw_sg(out_sg);
1673
1674         /* 1st seg */
1675         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1676         sg->length = mbuf->data_len - sym->auth.data.offset;
1677         sg->offset = sym->auth.data.offset;
1678
1679         /* Successive segs */
1680         mbuf = mbuf->next;
1681         while (mbuf) {
1682                 cpu_to_hw_sg(sg);
1683                 sg++;
1684                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1685                 sg->length = mbuf->data_len;
1686                 mbuf = mbuf->next;
1687         }
1688         sg->length -= ses->digest_length;
1689
1690         if (is_encode(ses)) {
1691                 cpu_to_hw_sg(sg);
1692                 /* set auth output */
1693                 sg++;
1694                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1695                 sg->length = ses->digest_length;
1696         }
1697         sg->final = 1;
1698         cpu_to_hw_sg(sg);
1699
1700         /* input */
1701         mbuf = sym->m_src;
1702         in_sg = &cf->sg[1];
1703         in_sg->extension = 1;
1704         in_sg->final = 1;
1705         if (is_encode(ses))
1706                 in_sg->length = ses->iv.length + sym->auth.data.length;
1707         else
1708                 in_sg->length = ses->iv.length + sym->auth.data.length
1709                                                 + ses->digest_length;
1710
1711         /* input sg entries */
1712         sg++;
1713         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1714         cpu_to_hw_sg(in_sg);
1715
1716         /* 1st seg IV */
1717         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1718         sg->length = ses->iv.length;
1719         cpu_to_hw_sg(sg);
1720
1721         /* 2nd seg */
1722         sg++;
1723         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1724         sg->length = mbuf->data_len - sym->auth.data.offset;
1725         sg->offset = sym->auth.data.offset;
1726
1727         /* Successive segs */
1728         mbuf = mbuf->next;
1729         while (mbuf) {
1730                 cpu_to_hw_sg(sg);
1731                 sg++;
1732                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1733                 sg->length = mbuf->data_len;
1734                 mbuf = mbuf->next;
1735         }
1736
1737         sg->length -= ses->digest_length;
1738         if (is_decode(ses)) {
1739                 cpu_to_hw_sg(sg);
1740                 sg++;
1741                 memcpy(ctx->digest, sym->auth.digest.data,
1742                         ses->digest_length);
1743                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1744                 sg->length = ses->digest_length;
1745         }
1746         sg->final = 1;
1747         cpu_to_hw_sg(sg);
1748
1749         return cf;
1750 }
1751
1752 static inline struct dpaa_sec_job *
1753 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1754 {
1755         struct rte_crypto_sym_op *sym = op->sym;
1756         struct dpaa_sec_job *cf;
1757         struct dpaa_sec_op_ctx *ctx;
1758         struct qm_sg_entry *sg;
1759         rte_iova_t src_start_addr, dst_start_addr;
1760         uint32_t length = 0;
1761         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1762                         ses->iv.offset);
1763
1764         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1765         if (sym->m_dst)
1766                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1767         else
1768                 dst_start_addr = src_start_addr;
1769
1770         ctx = dpaa_sec_alloc_ctx(ses, 7);
1771         if (!ctx)
1772                 return NULL;
1773
1774         cf = &ctx->job;
1775         ctx->op = op;
1776
1777         /* input */
1778         rte_prefetch0(cf->sg);
1779         sg = &cf->sg[2];
1780         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1781         if (is_encode(ses)) {
1782                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1783                 sg->length = ses->iv.length;
1784                 length += sg->length;
1785                 cpu_to_hw_sg(sg);
1786
1787                 sg++;
1788                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1789                 sg->length = sym->auth.data.length;
1790                 length += sg->length;
1791                 sg->final = 1;
1792                 cpu_to_hw_sg(sg);
1793         } else {
1794                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1795                 sg->length = ses->iv.length;
1796                 length += sg->length;
1797                 cpu_to_hw_sg(sg);
1798
1799                 sg++;
1800
1801                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1802                 sg->length = sym->auth.data.length;
1803                 length += sg->length;
1804                 cpu_to_hw_sg(sg);
1805
1806                 memcpy(ctx->digest, sym->auth.digest.data,
1807                        ses->digest_length);
1808                 sg++;
1809
1810                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1811                 sg->length = ses->digest_length;
1812                 length += sg->length;
1813                 sg->final = 1;
1814                 cpu_to_hw_sg(sg);
1815         }
1816         /* input compound frame */
1817         cf->sg[1].length = length;
1818         cf->sg[1].extension = 1;
1819         cf->sg[1].final = 1;
1820         cpu_to_hw_sg(&cf->sg[1]);
1821
1822         /* output */
1823         sg++;
1824         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1825         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1826         sg->length = sym->cipher.data.length;
1827         length = sg->length;
1828         if (is_encode(ses)) {
1829                 cpu_to_hw_sg(sg);
1830                 /* set auth output */
1831                 sg++;
1832                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1833                 sg->length = ses->digest_length;
1834                 length += sg->length;
1835         }
1836         sg->final = 1;
1837         cpu_to_hw_sg(sg);
1838
1839         /* output compound frame */
1840         cf->sg[0].length = length;
1841         cf->sg[0].extension = 1;
1842         cpu_to_hw_sg(&cf->sg[0]);
1843
1844         return cf;
1845 }
1846
1847 static inline struct dpaa_sec_job *
1848 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1849 {
1850         struct rte_crypto_sym_op *sym = op->sym;
1851         struct dpaa_sec_job *cf;
1852         struct dpaa_sec_op_ctx *ctx;
1853         struct qm_sg_entry *sg;
1854         phys_addr_t src_start_addr, dst_start_addr;
1855
1856         ctx = dpaa_sec_alloc_ctx(ses, 2);
1857         if (!ctx)
1858                 return NULL;
1859         cf = &ctx->job;
1860         ctx->op = op;
1861
1862         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1863
1864         if (sym->m_dst)
1865                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1866         else
1867                 dst_start_addr = src_start_addr;
1868
1869         /* input */
1870         sg = &cf->sg[1];
1871         qm_sg_entry_set64(sg, src_start_addr);
1872         sg->length = sym->m_src->pkt_len;
1873         sg->final = 1;
1874         cpu_to_hw_sg(sg);
1875
1876         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1877         /* output */
1878         sg = &cf->sg[0];
1879         qm_sg_entry_set64(sg, dst_start_addr);
1880         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1881         cpu_to_hw_sg(sg);
1882
1883         return cf;
1884 }
1885
1886 static inline struct dpaa_sec_job *
1887 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1888 {
1889         struct rte_crypto_sym_op *sym = op->sym;
1890         struct dpaa_sec_job *cf;
1891         struct dpaa_sec_op_ctx *ctx;
1892         struct qm_sg_entry *sg, *out_sg, *in_sg;
1893         struct rte_mbuf *mbuf;
1894         uint8_t req_segs;
1895         uint32_t in_len = 0, out_len = 0;
1896
1897         if (sym->m_dst)
1898                 mbuf = sym->m_dst;
1899         else
1900                 mbuf = sym->m_src;
1901
1902         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1903         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1904                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1905                                 MAX_SG_ENTRIES);
1906                 return NULL;
1907         }
1908
1909         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1910         if (!ctx)
1911                 return NULL;
1912         cf = &ctx->job;
1913         ctx->op = op;
1914         /* output */
1915         out_sg = &cf->sg[0];
1916         out_sg->extension = 1;
1917         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1918
1919         /* 1st seg */
1920         sg = &cf->sg[2];
1921         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1922         sg->offset = 0;
1923
1924         /* Successive segs */
1925         while (mbuf->next) {
1926                 sg->length = mbuf->data_len;
1927                 out_len += sg->length;
1928                 mbuf = mbuf->next;
1929                 cpu_to_hw_sg(sg);
1930                 sg++;
1931                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1932                 sg->offset = 0;
1933         }
1934         sg->length = mbuf->buf_len - mbuf->data_off;
1935         out_len += sg->length;
1936         sg->final = 1;
1937         cpu_to_hw_sg(sg);
1938
1939         out_sg->length = out_len;
1940         cpu_to_hw_sg(out_sg);
1941
1942         /* input */
1943         mbuf = sym->m_src;
1944         in_sg = &cf->sg[1];
1945         in_sg->extension = 1;
1946         in_sg->final = 1;
1947         in_len = mbuf->data_len;
1948
1949         sg++;
1950         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1951
1952         /* 1st seg */
1953         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1954         sg->length = mbuf->data_len;
1955         sg->offset = 0;
1956
1957         /* Successive segs */
1958         mbuf = mbuf->next;
1959         while (mbuf) {
1960                 cpu_to_hw_sg(sg);
1961                 sg++;
1962                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1963                 sg->length = mbuf->data_len;
1964                 sg->offset = 0;
1965                 in_len += sg->length;
1966                 mbuf = mbuf->next;
1967         }
1968         sg->final = 1;
1969         cpu_to_hw_sg(sg);
1970
1971         in_sg->length = in_len;
1972         cpu_to_hw_sg(in_sg);
1973
1974         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1975
1976         return cf;
1977 }
1978
1979 static uint16_t
1980 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1981                        uint16_t nb_ops)
1982 {
1983         /* Function to transmit the frames to given device and queuepair */
1984         uint32_t loop;
1985         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1986         uint16_t num_tx = 0;
1987         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1988         uint32_t frames_to_send;
1989         struct rte_crypto_op *op;
1990         struct dpaa_sec_job *cf;
1991         dpaa_sec_session *ses;
1992         uint16_t auth_hdr_len, auth_tail_len;
1993         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1994         struct qman_fq *inq[DPAA_SEC_BURST];
1995
1996         while (nb_ops) {
1997                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1998                                 DPAA_SEC_BURST : nb_ops;
1999                 for (loop = 0; loop < frames_to_send; loop++) {
2000                         op = *(ops++);
2001                         if (op->sym->m_src->seqn != 0) {
2002                                 index = op->sym->m_src->seqn - 1;
2003                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
2004                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
2005                                         flags[loop] = ((index & 0x0f) << 8);
2006                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
2007                                         DPAA_PER_LCORE_DQRR_SIZE--;
2008                                         DPAA_PER_LCORE_DQRR_HELD &=
2009                                                                 ~(1 << index);
2010                                 }
2011                         }
2012
2013                         switch (op->sess_type) {
2014                         case RTE_CRYPTO_OP_WITH_SESSION:
2015                                 ses = (dpaa_sec_session *)
2016                                         get_sym_session_private_data(
2017                                                         op->sym->session,
2018                                                         cryptodev_driver_id);
2019                                 break;
2020                         case RTE_CRYPTO_OP_SECURITY_SESSION:
2021                                 ses = (dpaa_sec_session *)
2022                                         get_sec_session_private_data(
2023                                                         op->sym->sec_session);
2024                                 break;
2025                         default:
2026                                 DPAA_SEC_DP_ERR(
2027                                         "sessionless crypto op not supported");
2028                                 frames_to_send = loop;
2029                                 nb_ops = loop;
2030                                 goto send_pkts;
2031                         }
2032                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
2033                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
2034                                         frames_to_send = loop;
2035                                         nb_ops = loop;
2036                                         goto send_pkts;
2037                                 }
2038                         } else if (unlikely(ses->qp[rte_lcore_id() %
2039                                                 MAX_DPAA_CORES] != qp)) {
2040                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
2041                                         " New qp = %p\n",
2042                                         ses->qp[rte_lcore_id() %
2043                                         MAX_DPAA_CORES], qp);
2044                                 frames_to_send = loop;
2045                                 nb_ops = loop;
2046                                 goto send_pkts;
2047                         }
2048
2049                         auth_hdr_len = op->sym->auth.data.length -
2050                                                 op->sym->cipher.data.length;
2051                         auth_tail_len = 0;
2052
2053                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
2054                                   ((op->sym->m_dst == NULL) ||
2055                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
2056                                 if (is_proto_ipsec(ses)) {
2057                                         cf = build_proto(op, ses);
2058                                 } else if (is_proto_pdcp(ses)) {
2059                                         cf = build_proto(op, ses);
2060                                 } else if (is_auth_only(ses)) {
2061                                         cf = build_auth_only(op, ses);
2062                                 } else if (is_cipher_only(ses)) {
2063                                         cf = build_cipher_only(op, ses);
2064                                 } else if (is_aead(ses)) {
2065                                         cf = build_cipher_auth_gcm(op, ses);
2066                                         auth_hdr_len = ses->auth_only_len;
2067                                 } else if (is_auth_cipher(ses)) {
2068                                         auth_hdr_len =
2069                                                 op->sym->cipher.data.offset
2070                                                 - op->sym->auth.data.offset;
2071                                         auth_tail_len =
2072                                                 op->sym->auth.data.length
2073                                                 - op->sym->cipher.data.length
2074                                                 - auth_hdr_len;
2075                                         cf = build_cipher_auth(op, ses);
2076                                 } else {
2077                                         DPAA_SEC_DP_ERR("not supported ops");
2078                                         frames_to_send = loop;
2079                                         nb_ops = loop;
2080                                         goto send_pkts;
2081                                 }
2082                         } else {
2083                                 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
2084                                         cf = build_proto_sg(op, ses);
2085                                 } else if (is_auth_only(ses)) {
2086                                         cf = build_auth_only_sg(op, ses);
2087                                 } else if (is_cipher_only(ses)) {
2088                                         cf = build_cipher_only_sg(op, ses);
2089                                 } else if (is_aead(ses)) {
2090                                         cf = build_cipher_auth_gcm_sg(op, ses);
2091                                         auth_hdr_len = ses->auth_only_len;
2092                                 } else if (is_auth_cipher(ses)) {
2093                                         auth_hdr_len =
2094                                                 op->sym->cipher.data.offset
2095                                                 - op->sym->auth.data.offset;
2096                                         auth_tail_len =
2097                                                 op->sym->auth.data.length
2098                                                 - op->sym->cipher.data.length
2099                                                 - auth_hdr_len;
2100                                         cf = build_cipher_auth_sg(op, ses);
2101                                 } else {
2102                                         DPAA_SEC_DP_ERR("not supported ops");
2103                                         frames_to_send = loop;
2104                                         nb_ops = loop;
2105                                         goto send_pkts;
2106                                 }
2107                         }
2108                         if (unlikely(!cf)) {
2109                                 frames_to_send = loop;
2110                                 nb_ops = loop;
2111                                 goto send_pkts;
2112                         }
2113
2114                         fd = &fds[loop];
2115                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2116                         fd->opaque_addr = 0;
2117                         fd->cmd = 0;
2118                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
2119                         fd->_format1 = qm_fd_compound;
2120                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
2121
2122                         /* Auth_only_len is set as 0 in descriptor and it is
2123                          * overwritten here in the fd.cmd which will update
2124                          * the DPOVRD reg.
2125                          */
2126                         if (auth_hdr_len || auth_tail_len) {
2127                                 fd->cmd = 0x80000000;
2128                                 fd->cmd |=
2129                                         ((auth_tail_len << 16) | auth_hdr_len);
2130                         }
2131
2132                         /* In case of PDCP, per packet HFN is stored in
2133                          * mbuf priv after sym_op.
2134                          */
2135                         if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
2136                                 fd->cmd = 0x80000000 |
2137                                         *((uint32_t *)((uint8_t *)op +
2138                                         ses->pdcp.hfn_ovd_offset));
2139                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
2140                                         *((uint32_t *)((uint8_t *)op +
2141                                         ses->pdcp.hfn_ovd_offset)),
2142                                         ses->pdcp.hfn_ovd,
2143                                         is_proto_pdcp(ses));
2144                         }
2145
2146                 }
2147 send_pkts:
2148                 loop = 0;
2149                 while (loop < frames_to_send) {
2150                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2151                                         &flags[loop], frames_to_send - loop);
2152                 }
2153                 nb_ops -= frames_to_send;
2154                 num_tx += frames_to_send;
2155         }
2156
2157         dpaa_qp->tx_pkts += num_tx;
2158         dpaa_qp->tx_errs += nb_ops - num_tx;
2159
2160         return num_tx;
2161 }
2162
2163 static uint16_t
2164 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2165                        uint16_t nb_ops)
2166 {
2167         uint16_t num_rx;
2168         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2169
2170         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2171
2172         dpaa_qp->rx_pkts += num_rx;
2173         dpaa_qp->rx_errs += nb_ops - num_rx;
2174
2175         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2176
2177         return num_rx;
2178 }
2179
2180 /** Release queue pair */
2181 static int
2182 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2183                             uint16_t qp_id)
2184 {
2185         struct dpaa_sec_dev_private *internals;
2186         struct dpaa_sec_qp *qp = NULL;
2187
2188         PMD_INIT_FUNC_TRACE();
2189
2190         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2191
2192         internals = dev->data->dev_private;
2193         if (qp_id >= internals->max_nb_queue_pairs) {
2194                 DPAA_SEC_ERR("Max supported qpid %d",
2195                              internals->max_nb_queue_pairs);
2196                 return -EINVAL;
2197         }
2198
2199         qp = &internals->qps[qp_id];
2200         rte_mempool_free(qp->ctx_pool);
2201         qp->internals = NULL;
2202         dev->data->queue_pairs[qp_id] = NULL;
2203
2204         return 0;
2205 }
2206
2207 /** Setup a queue pair */
2208 static int
2209 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2210                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2211                 __rte_unused int socket_id)
2212 {
2213         struct dpaa_sec_dev_private *internals;
2214         struct dpaa_sec_qp *qp = NULL;
2215         char str[20];
2216
2217         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2218
2219         internals = dev->data->dev_private;
2220         if (qp_id >= internals->max_nb_queue_pairs) {
2221                 DPAA_SEC_ERR("Max supported qpid %d",
2222                              internals->max_nb_queue_pairs);
2223                 return -EINVAL;
2224         }
2225
2226         qp = &internals->qps[qp_id];
2227         qp->internals = internals;
2228         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2229                         dev->data->dev_id, qp_id);
2230         if (!qp->ctx_pool) {
2231                 qp->ctx_pool = rte_mempool_create((const char *)str,
2232                                                         CTX_POOL_NUM_BUFS,
2233                                                         CTX_POOL_BUF_SIZE,
2234                                                         CTX_POOL_CACHE_SIZE, 0,
2235                                                         NULL, NULL, NULL, NULL,
2236                                                         SOCKET_ID_ANY, 0);
2237                 if (!qp->ctx_pool) {
2238                         DPAA_SEC_ERR("%s create failed\n", str);
2239                         return -ENOMEM;
2240                 }
2241         } else
2242                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2243                                 dev->data->dev_id, qp_id);
2244         dev->data->queue_pairs[qp_id] = qp;
2245
2246         return 0;
2247 }
2248
2249 /** Return the number of allocated queue pairs */
2250 static uint32_t
2251 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2252 {
2253         PMD_INIT_FUNC_TRACE();
2254
2255         return dev->data->nb_queue_pairs;
2256 }
2257
2258 /** Returns the size of session structure */
2259 static unsigned int
2260 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2261 {
2262         PMD_INIT_FUNC_TRACE();
2263
2264         return sizeof(dpaa_sec_session);
2265 }
2266
2267 static int
2268 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2269                      struct rte_crypto_sym_xform *xform,
2270                      dpaa_sec_session *session)
2271 {
2272         session->cipher_alg = xform->cipher.algo;
2273         session->iv.length = xform->cipher.iv.length;
2274         session->iv.offset = xform->cipher.iv.offset;
2275         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2276                                                RTE_CACHE_LINE_SIZE);
2277         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2278                 DPAA_SEC_ERR("No Memory for cipher key");
2279                 return -ENOMEM;
2280         }
2281         session->cipher_key.length = xform->cipher.key.length;
2282
2283         memcpy(session->cipher_key.data, xform->cipher.key.data,
2284                xform->cipher.key.length);
2285         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2286                         DIR_ENC : DIR_DEC;
2287
2288         return 0;
2289 }
2290
2291 static int
2292 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2293                    struct rte_crypto_sym_xform *xform,
2294                    dpaa_sec_session *session)
2295 {
2296         session->auth_alg = xform->auth.algo;
2297         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2298                                              RTE_CACHE_LINE_SIZE);
2299         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2300                 DPAA_SEC_ERR("No Memory for auth key");
2301                 return -ENOMEM;
2302         }
2303         session->auth_key.length = xform->auth.key.length;
2304         session->digest_length = xform->auth.digest_length;
2305         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2306                 session->iv.offset = xform->auth.iv.offset;
2307                 session->iv.length = xform->auth.iv.length;
2308         }
2309
2310         memcpy(session->auth_key.data, xform->auth.key.data,
2311                xform->auth.key.length);
2312         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2313                         DIR_ENC : DIR_DEC;
2314
2315         return 0;
2316 }
2317
2318 static int
2319 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2320                    struct rte_crypto_sym_xform *xform,
2321                    dpaa_sec_session *session)
2322 {
2323         session->aead_alg = xform->aead.algo;
2324         session->iv.length = xform->aead.iv.length;
2325         session->iv.offset = xform->aead.iv.offset;
2326         session->auth_only_len = xform->aead.aad_length;
2327         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2328                                              RTE_CACHE_LINE_SIZE);
2329         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2330                 DPAA_SEC_ERR("No Memory for aead key\n");
2331                 return -ENOMEM;
2332         }
2333         session->aead_key.length = xform->aead.key.length;
2334         session->digest_length = xform->aead.digest_length;
2335
2336         memcpy(session->aead_key.data, xform->aead.key.data,
2337                xform->aead.key.length);
2338         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2339                         DIR_ENC : DIR_DEC;
2340
2341         return 0;
2342 }
2343
2344 static struct qman_fq *
2345 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2346 {
2347         unsigned int i;
2348
2349         for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2350                 if (qi->inq_attach[i] == 0) {
2351                         qi->inq_attach[i] = 1;
2352                         return &qi->inq[i];
2353                 }
2354         }
2355         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2356
2357         return NULL;
2358 }
2359
2360 static int
2361 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2362 {
2363         unsigned int i;
2364
2365         for (i = 0; i < qi->max_nb_sessions; i++) {
2366                 if (&qi->inq[i] == fq) {
2367                         qman_retire_fq(fq, NULL);
2368                         qman_oos_fq(fq);
2369                         qi->inq_attach[i] = 0;
2370                         return 0;
2371                 }
2372         }
2373         return -1;
2374 }
2375
2376 static int
2377 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2378 {
2379         int ret;
2380
2381         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2382         ret = dpaa_sec_prep_cdb(sess);
2383         if (ret) {
2384                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2385                 return -1;
2386         }
2387         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2388                 ret = rte_dpaa_portal_init((void *)0);
2389                 if (ret) {
2390                         DPAA_SEC_ERR("Failure in affining portal");
2391                         return ret;
2392                 }
2393         }
2394         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2395                                dpaa_mem_vtop(&sess->cdb),
2396                                qman_fq_fqid(&qp->outq));
2397         if (ret)
2398                 DPAA_SEC_ERR("Unable to init sec queue");
2399
2400         return ret;
2401 }
2402
2403 static int
2404 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2405                             struct rte_crypto_sym_xform *xform, void *sess)
2406 {
2407         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2408         dpaa_sec_session *session = sess;
2409         uint32_t i;
2410
2411         PMD_INIT_FUNC_TRACE();
2412
2413         if (unlikely(sess == NULL)) {
2414                 DPAA_SEC_ERR("invalid session struct");
2415                 return -EINVAL;
2416         }
2417         memset(session, 0, sizeof(dpaa_sec_session));
2418
2419         /* Default IV length = 0 */
2420         session->iv.length = 0;
2421
2422         /* Cipher Only */
2423         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2424                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2425                 dpaa_sec_cipher_init(dev, xform, session);
2426
2427         /* Authentication Only */
2428         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2429                    xform->next == NULL) {
2430                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2431                 dpaa_sec_auth_init(dev, xform, session);
2432
2433         /* Cipher then Authenticate */
2434         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2435                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2436                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2437                         dpaa_sec_cipher_init(dev, xform, session);
2438                         dpaa_sec_auth_init(dev, xform->next, session);
2439                 } else {
2440                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2441                         return -EINVAL;
2442                 }
2443
2444         /* Authenticate then Cipher */
2445         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2446                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2447                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2448                         dpaa_sec_auth_init(dev, xform, session);
2449                         dpaa_sec_cipher_init(dev, xform->next, session);
2450                 } else {
2451                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2452                         return -EINVAL;
2453                 }
2454
2455         /* AEAD operation for AES-GCM kind of Algorithms */
2456         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2457                    xform->next == NULL) {
2458                 dpaa_sec_aead_init(dev, xform, session);
2459
2460         } else {
2461                 DPAA_SEC_ERR("Invalid crypto type");
2462                 return -EINVAL;
2463         }
2464         rte_spinlock_lock(&internals->lock);
2465         for (i = 0; i < MAX_DPAA_CORES; i++) {
2466                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2467                 if (session->inq[i] == NULL) {
2468                         DPAA_SEC_ERR("unable to attach sec queue");
2469                         rte_spinlock_unlock(&internals->lock);
2470                         goto err1;
2471                 }
2472         }
2473         rte_spinlock_unlock(&internals->lock);
2474
2475         return 0;
2476
2477 err1:
2478         rte_free(session->cipher_key.data);
2479         rte_free(session->auth_key.data);
2480         memset(session, 0, sizeof(dpaa_sec_session));
2481
2482         return -EINVAL;
2483 }
2484
2485 static int
2486 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2487                 struct rte_crypto_sym_xform *xform,
2488                 struct rte_cryptodev_sym_session *sess,
2489                 struct rte_mempool *mempool)
2490 {
2491         void *sess_private_data;
2492         int ret;
2493
2494         PMD_INIT_FUNC_TRACE();
2495
2496         if (rte_mempool_get(mempool, &sess_private_data)) {
2497                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2498                 return -ENOMEM;
2499         }
2500
2501         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2502         if (ret != 0) {
2503                 DPAA_SEC_ERR("failed to configure session parameters");
2504
2505                 /* Return session to mempool */
2506                 rte_mempool_put(mempool, sess_private_data);
2507                 return ret;
2508         }
2509
2510         set_sym_session_private_data(sess, dev->driver_id,
2511                         sess_private_data);
2512
2513
2514         return 0;
2515 }
2516
2517 static inline void
2518 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2519 {
2520         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2521         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2522         uint8_t i;
2523
2524         for (i = 0; i < MAX_DPAA_CORES; i++) {
2525                 if (s->inq[i])
2526                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2527                 s->inq[i] = NULL;
2528                 s->qp[i] = NULL;
2529         }
2530         rte_free(s->cipher_key.data);
2531         rte_free(s->auth_key.data);
2532         memset(s, 0, sizeof(dpaa_sec_session));
2533         rte_mempool_put(sess_mp, (void *)s);
2534 }
2535
2536 /** Clear the memory of session so it doesn't leave key material behind */
2537 static void
2538 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2539                 struct rte_cryptodev_sym_session *sess)
2540 {
2541         PMD_INIT_FUNC_TRACE();
2542         uint8_t index = dev->driver_id;
2543         void *sess_priv = get_sym_session_private_data(sess, index);
2544         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2545
2546         if (sess_priv) {
2547                 free_session_memory(dev, s);
2548                 set_sym_session_private_data(sess, index, NULL);
2549         }
2550 }
2551
2552 static int
2553 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2554                            struct rte_security_session_conf *conf,
2555                            void *sess)
2556 {
2557         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2558         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2559         struct rte_crypto_auth_xform *auth_xform = NULL;
2560         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2561         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2562         uint32_t i;
2563
2564         PMD_INIT_FUNC_TRACE();
2565
2566         memset(session, 0, sizeof(dpaa_sec_session));
2567         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2568                 cipher_xform = &conf->crypto_xform->cipher;
2569                 if (conf->crypto_xform->next)
2570                         auth_xform = &conf->crypto_xform->next->auth;
2571         } else {
2572                 auth_xform = &conf->crypto_xform->auth;
2573                 if (conf->crypto_xform->next)
2574                         cipher_xform = &conf->crypto_xform->next->cipher;
2575         }
2576         session->proto_alg = conf->protocol;
2577
2578         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2579                 session->cipher_key.data = rte_zmalloc(NULL,
2580                                                        cipher_xform->key.length,
2581                                                        RTE_CACHE_LINE_SIZE);
2582                 if (session->cipher_key.data == NULL &&
2583                                 cipher_xform->key.length > 0) {
2584                         DPAA_SEC_ERR("No Memory for cipher key");
2585                         return -ENOMEM;
2586                 }
2587                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2588                                 cipher_xform->key.length);
2589                 session->cipher_key.length = cipher_xform->key.length;
2590
2591                 switch (cipher_xform->algo) {
2592                 case RTE_CRYPTO_CIPHER_AES_CBC:
2593                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2594                 case RTE_CRYPTO_CIPHER_AES_CTR:
2595                         break;
2596                 default:
2597                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2598                                 cipher_xform->algo);
2599                         goto out;
2600                 }
2601                 session->cipher_alg = cipher_xform->algo;
2602         } else {
2603                 session->cipher_key.data = NULL;
2604                 session->cipher_key.length = 0;
2605                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2606         }
2607
2608         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2609                 session->auth_key.data = rte_zmalloc(NULL,
2610                                                 auth_xform->key.length,
2611                                                 RTE_CACHE_LINE_SIZE);
2612                 if (session->auth_key.data == NULL &&
2613                                 auth_xform->key.length > 0) {
2614                         DPAA_SEC_ERR("No Memory for auth key");
2615                         rte_free(session->cipher_key.data);
2616                         return -ENOMEM;
2617                 }
2618                 memcpy(session->auth_key.data, auth_xform->key.data,
2619                                 auth_xform->key.length);
2620                 session->auth_key.length = auth_xform->key.length;
2621
2622                 switch (auth_xform->algo) {
2623                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2624                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2625                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2626                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2627                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2628                 case RTE_CRYPTO_AUTH_AES_CMAC:
2629                         break;
2630                 default:
2631                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2632                                 auth_xform->algo);
2633                         goto out;
2634                 }
2635                 session->auth_alg = auth_xform->algo;
2636         } else {
2637                 session->auth_key.data = NULL;
2638                 session->auth_key.length = 0;
2639                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2640         }
2641
2642         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2643                 if (ipsec_xform->tunnel.type ==
2644                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2645                         memset(&session->encap_pdb, 0,
2646                                 sizeof(struct ipsec_encap_pdb) +
2647                                 sizeof(session->ip4_hdr));
2648                         session->ip4_hdr.ip_v = IPVERSION;
2649                         session->ip4_hdr.ip_hl = 5;
2650                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2651                                                 sizeof(session->ip4_hdr));
2652                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2653                         session->ip4_hdr.ip_id = 0;
2654                         session->ip4_hdr.ip_off = 0;
2655                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2656                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2657                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2658                                         IPPROTO_ESP : IPPROTO_AH;
2659                         session->ip4_hdr.ip_sum = 0;
2660                         session->ip4_hdr.ip_src =
2661                                         ipsec_xform->tunnel.ipv4.src_ip;
2662                         session->ip4_hdr.ip_dst =
2663                                         ipsec_xform->tunnel.ipv4.dst_ip;
2664                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2665                                                 (void *)&session->ip4_hdr,
2666                                                 sizeof(struct ip));
2667                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2668                 } else if (ipsec_xform->tunnel.type ==
2669                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2670                         memset(&session->encap_pdb, 0,
2671                                 sizeof(struct ipsec_encap_pdb) +
2672                                 sizeof(session->ip6_hdr));
2673                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2674                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2675                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2676                                         RTE_IPV6_HDR_TC_SHIFT) &
2677                                         RTE_IPV6_HDR_TC_MASK) |
2678                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2679                                         RTE_IPV6_HDR_FL_SHIFT) &
2680                                         RTE_IPV6_HDR_FL_MASK));
2681                         /* Payload length will be updated by HW */
2682                         session->ip6_hdr.payload_len = 0;
2683                         session->ip6_hdr.hop_limits =
2684                                         ipsec_xform->tunnel.ipv6.hlimit;
2685                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2686                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2687                                         IPPROTO_ESP : IPPROTO_AH;
2688                         memcpy(&session->ip6_hdr.src_addr,
2689                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2690                         memcpy(&session->ip6_hdr.dst_addr,
2691                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2692                         session->encap_pdb.ip_hdr_len =
2693                                                 sizeof(struct rte_ipv6_hdr);
2694                 }
2695                 session->encap_pdb.options =
2696                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2697                         PDBOPTS_ESP_OIHI_PDB_INL |
2698                         PDBOPTS_ESP_IVSRC |
2699                         PDBHMO_ESP_ENCAP_DTTL |
2700                         PDBHMO_ESP_SNR;
2701                 if (ipsec_xform->options.esn)
2702                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2703                 session->encap_pdb.spi = ipsec_xform->spi;
2704                 session->dir = DIR_ENC;
2705         } else if (ipsec_xform->direction ==
2706                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2707                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2708                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2709                         session->decap_pdb.options = sizeof(struct ip) << 16;
2710                 else
2711                         session->decap_pdb.options =
2712                                         sizeof(struct rte_ipv6_hdr) << 16;
2713                 if (ipsec_xform->options.esn)
2714                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2715                 session->dir = DIR_DEC;
2716         } else
2717                 goto out;
2718         rte_spinlock_lock(&internals->lock);
2719         for (i = 0; i < MAX_DPAA_CORES; i++) {
2720                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2721                 if (session->inq[i] == NULL) {
2722                         DPAA_SEC_ERR("unable to attach sec queue");
2723                         rte_spinlock_unlock(&internals->lock);
2724                         goto out;
2725                 }
2726         }
2727         rte_spinlock_unlock(&internals->lock);
2728
2729         return 0;
2730 out:
2731         rte_free(session->auth_key.data);
2732         rte_free(session->cipher_key.data);
2733         memset(session, 0, sizeof(dpaa_sec_session));
2734         return -1;
2735 }
2736
2737 static int
2738 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2739                           struct rte_security_session_conf *conf,
2740                           void *sess)
2741 {
2742         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2743         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2744         struct rte_crypto_auth_xform *auth_xform = NULL;
2745         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2746         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2747         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2748         uint32_t i;
2749
2750         PMD_INIT_FUNC_TRACE();
2751
2752         memset(session, 0, sizeof(dpaa_sec_session));
2753
2754         /* find xfrm types */
2755         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2756                 cipher_xform = &xform->cipher;
2757                 if (xform->next != NULL)
2758                         auth_xform = &xform->next->auth;
2759         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2760                 auth_xform = &xform->auth;
2761                 if (xform->next != NULL)
2762                         cipher_xform = &xform->next->cipher;
2763         } else {
2764                 DPAA_SEC_ERR("Invalid crypto type");
2765                 return -EINVAL;
2766         }
2767
2768         session->proto_alg = conf->protocol;
2769         if (cipher_xform) {
2770                 session->cipher_key.data = rte_zmalloc(NULL,
2771                                                cipher_xform->key.length,
2772                                                RTE_CACHE_LINE_SIZE);
2773                 if (session->cipher_key.data == NULL &&
2774                                 cipher_xform->key.length > 0) {
2775                         DPAA_SEC_ERR("No Memory for cipher key");
2776                         return -ENOMEM;
2777                 }
2778                 session->cipher_key.length = cipher_xform->key.length;
2779                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2780                         cipher_xform->key.length);
2781                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2782                                         DIR_ENC : DIR_DEC;
2783                 session->cipher_alg = cipher_xform->algo;
2784         } else {
2785                 session->cipher_key.data = NULL;
2786                 session->cipher_key.length = 0;
2787                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2788                 session->dir = DIR_ENC;
2789         }
2790
2791         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2792                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2793                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2794                         DPAA_SEC_ERR(
2795                                 "PDCP Seq Num size should be 5/12 bits for cmode");
2796                         goto out;
2797                 }
2798         }
2799
2800         if (auth_xform) {
2801                 session->auth_key.data = rte_zmalloc(NULL,
2802                                                      auth_xform->key.length,
2803                                                      RTE_CACHE_LINE_SIZE);
2804                 if (!session->auth_key.data &&
2805                     auth_xform->key.length > 0) {
2806                         DPAA_SEC_ERR("No Memory for auth key");
2807                         rte_free(session->cipher_key.data);
2808                         return -ENOMEM;
2809                 }
2810                 session->auth_key.length = auth_xform->key.length;
2811                 memcpy(session->auth_key.data, auth_xform->key.data,
2812                        auth_xform->key.length);
2813                 session->auth_alg = auth_xform->algo;
2814         } else {
2815                 session->auth_key.data = NULL;
2816                 session->auth_key.length = 0;
2817                 session->auth_alg = 0;
2818         }
2819         session->pdcp.domain = pdcp_xform->domain;
2820         session->pdcp.bearer = pdcp_xform->bearer;
2821         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2822         session->pdcp.sn_size = pdcp_xform->sn_size;
2823         session->pdcp.hfn = pdcp_xform->hfn;
2824         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2825         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2826         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2827
2828         rte_spinlock_lock(&dev_priv->lock);
2829         for (i = 0; i < MAX_DPAA_CORES; i++) {
2830                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2831                 if (session->inq[i] == NULL) {
2832                         DPAA_SEC_ERR("unable to attach sec queue");
2833                         rte_spinlock_unlock(&dev_priv->lock);
2834                         goto out;
2835                 }
2836         }
2837         rte_spinlock_unlock(&dev_priv->lock);
2838         return 0;
2839 out:
2840         rte_free(session->auth_key.data);
2841         rte_free(session->cipher_key.data);
2842         memset(session, 0, sizeof(dpaa_sec_session));
2843         return -1;
2844 }
2845
2846 static int
2847 dpaa_sec_security_session_create(void *dev,
2848                                  struct rte_security_session_conf *conf,
2849                                  struct rte_security_session *sess,
2850                                  struct rte_mempool *mempool)
2851 {
2852         void *sess_private_data;
2853         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2854         int ret;
2855
2856         if (rte_mempool_get(mempool, &sess_private_data)) {
2857                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2858                 return -ENOMEM;
2859         }
2860
2861         switch (conf->protocol) {
2862         case RTE_SECURITY_PROTOCOL_IPSEC:
2863                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2864                                 sess_private_data);
2865                 break;
2866         case RTE_SECURITY_PROTOCOL_PDCP:
2867                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2868                                 sess_private_data);
2869                 break;
2870         case RTE_SECURITY_PROTOCOL_MACSEC:
2871                 return -ENOTSUP;
2872         default:
2873                 return -EINVAL;
2874         }
2875         if (ret != 0) {
2876                 DPAA_SEC_ERR("failed to configure session parameters");
2877                 /* Return session to mempool */
2878                 rte_mempool_put(mempool, sess_private_data);
2879                 return ret;
2880         }
2881
2882         set_sec_session_private_data(sess, sess_private_data);
2883
2884         return ret;
2885 }
2886
2887 /** Clear the memory of session so it doesn't leave key material behind */
2888 static int
2889 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2890                 struct rte_security_session *sess)
2891 {
2892         PMD_INIT_FUNC_TRACE();
2893         void *sess_priv = get_sec_session_private_data(sess);
2894         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2895
2896         if (sess_priv) {
2897                 free_session_memory((struct rte_cryptodev *)dev, s);
2898                 set_sec_session_private_data(sess, NULL);
2899         }
2900         return 0;
2901 }
2902
2903 static int
2904 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2905                        struct rte_cryptodev_config *config __rte_unused)
2906 {
2907         PMD_INIT_FUNC_TRACE();
2908
2909         return 0;
2910 }
2911
2912 static int
2913 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2914 {
2915         PMD_INIT_FUNC_TRACE();
2916         return 0;
2917 }
2918
2919 static void
2920 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2921 {
2922         PMD_INIT_FUNC_TRACE();
2923 }
2924
2925 static int
2926 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2927 {
2928         PMD_INIT_FUNC_TRACE();
2929
2930         if (dev == NULL)
2931                 return -ENOMEM;
2932
2933         return 0;
2934 }
2935
2936 static void
2937 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2938                        struct rte_cryptodev_info *info)
2939 {
2940         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2941
2942         PMD_INIT_FUNC_TRACE();
2943         if (info != NULL) {
2944                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2945                 info->feature_flags = dev->feature_flags;
2946                 info->capabilities = dpaa_sec_capabilities;
2947                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2948                 info->driver_id = cryptodev_driver_id;
2949         }
2950 }
2951
2952 static enum qman_cb_dqrr_result
2953 dpaa_sec_process_parallel_event(void *event,
2954                         struct qman_portal *qm __always_unused,
2955                         struct qman_fq *outq,
2956                         const struct qm_dqrr_entry *dqrr,
2957                         void **bufs)
2958 {
2959         const struct qm_fd *fd;
2960         struct dpaa_sec_job *job;
2961         struct dpaa_sec_op_ctx *ctx;
2962         struct rte_event *ev = (struct rte_event *)event;
2963
2964         fd = &dqrr->fd;
2965
2966         /* sg is embedded in an op ctx,
2967          * sg[0] is for output
2968          * sg[1] for input
2969          */
2970         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2971
2972         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2973         ctx->fd_status = fd->status;
2974         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2975                 struct qm_sg_entry *sg_out;
2976                 uint32_t len;
2977
2978                 sg_out = &job->sg[0];
2979                 hw_sg_to_cpu(sg_out);
2980                 len = sg_out->length;
2981                 ctx->op->sym->m_src->pkt_len = len;
2982                 ctx->op->sym->m_src->data_len = len;
2983         }
2984         if (!ctx->fd_status) {
2985                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2986         } else {
2987                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2988                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2989         }
2990         ev->event_ptr = (void *)ctx->op;
2991
2992         ev->flow_id = outq->ev.flow_id;
2993         ev->sub_event_type = outq->ev.sub_event_type;
2994         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2995         ev->op = RTE_EVENT_OP_NEW;
2996         ev->sched_type = outq->ev.sched_type;
2997         ev->queue_id = outq->ev.queue_id;
2998         ev->priority = outq->ev.priority;
2999         *bufs = (void *)ctx->op;
3000
3001         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3002
3003         return qman_cb_dqrr_consume;
3004 }
3005
3006 static enum qman_cb_dqrr_result
3007 dpaa_sec_process_atomic_event(void *event,
3008                         struct qman_portal *qm __rte_unused,
3009                         struct qman_fq *outq,
3010                         const struct qm_dqrr_entry *dqrr,
3011                         void **bufs)
3012 {
3013         u8 index;
3014         const struct qm_fd *fd;
3015         struct dpaa_sec_job *job;
3016         struct dpaa_sec_op_ctx *ctx;
3017         struct rte_event *ev = (struct rte_event *)event;
3018
3019         fd = &dqrr->fd;
3020
3021         /* sg is embedded in an op ctx,
3022          * sg[0] is for output
3023          * sg[1] for input
3024          */
3025         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3026
3027         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3028         ctx->fd_status = fd->status;
3029         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3030                 struct qm_sg_entry *sg_out;
3031                 uint32_t len;
3032
3033                 sg_out = &job->sg[0];
3034                 hw_sg_to_cpu(sg_out);
3035                 len = sg_out->length;
3036                 ctx->op->sym->m_src->pkt_len = len;
3037                 ctx->op->sym->m_src->data_len = len;
3038         }
3039         if (!ctx->fd_status) {
3040                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3041         } else {
3042                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3043                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3044         }
3045         ev->event_ptr = (void *)ctx->op;
3046         ev->flow_id = outq->ev.flow_id;
3047         ev->sub_event_type = outq->ev.sub_event_type;
3048         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3049         ev->op = RTE_EVENT_OP_NEW;
3050         ev->sched_type = outq->ev.sched_type;
3051         ev->queue_id = outq->ev.queue_id;
3052         ev->priority = outq->ev.priority;
3053
3054         /* Save active dqrr entries */
3055         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3056         DPAA_PER_LCORE_DQRR_SIZE++;
3057         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3058         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3059         ev->impl_opaque = index + 1;
3060         ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3061         *bufs = (void *)ctx->op;
3062
3063         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3064
3065         return qman_cb_dqrr_defer;
3066 }
3067
3068 int
3069 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3070                 int qp_id,
3071                 uint16_t ch_id,
3072                 const struct rte_event *event)
3073 {
3074         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3075         struct qm_mcc_initfq opts = {0};
3076
3077         int ret;
3078
3079         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3080                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3081         opts.fqd.dest.channel = ch_id;
3082
3083         switch (event->sched_type) {
3084         case RTE_SCHED_TYPE_ATOMIC:
3085                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3086                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3087                  * configuration with HOLD_ACTIVE setting
3088                  */
3089                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3090                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3091                 break;
3092         case RTE_SCHED_TYPE_ORDERED:
3093                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3094                 return -1;
3095         default:
3096                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3097                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3098                 break;
3099         }
3100
3101         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3102         if (unlikely(ret)) {
3103                 DPAA_SEC_ERR("unable to init caam source fq!");
3104                 return ret;
3105         }
3106
3107         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3108
3109         return 0;
3110 }
3111
3112 int
3113 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3114                         int qp_id)
3115 {
3116         struct qm_mcc_initfq opts = {0};
3117         int ret;
3118         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3119
3120         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3121                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3122         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3123         qp->outq.cb.ern  = ern_sec_fq_handler;
3124         qman_retire_fq(&qp->outq, NULL);
3125         qman_oos_fq(&qp->outq);
3126         ret = qman_init_fq(&qp->outq, 0, &opts);
3127         if (ret)
3128                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3129         qp->outq.cb.dqrr = NULL;
3130
3131         return ret;
3132 }
3133
3134 static struct rte_cryptodev_ops crypto_ops = {
3135         .dev_configure        = dpaa_sec_dev_configure,
3136         .dev_start            = dpaa_sec_dev_start,
3137         .dev_stop             = dpaa_sec_dev_stop,
3138         .dev_close            = dpaa_sec_dev_close,
3139         .dev_infos_get        = dpaa_sec_dev_infos_get,
3140         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3141         .queue_pair_release   = dpaa_sec_queue_pair_release,
3142         .queue_pair_count     = dpaa_sec_queue_pair_count,
3143         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3144         .sym_session_configure    = dpaa_sec_sym_session_configure,
3145         .sym_session_clear        = dpaa_sec_sym_session_clear
3146 };
3147
3148 static const struct rte_security_capability *
3149 dpaa_sec_capabilities_get(void *device __rte_unused)
3150 {
3151         return dpaa_sec_security_cap;
3152 }
3153
3154 static const struct rte_security_ops dpaa_sec_security_ops = {
3155         .session_create = dpaa_sec_security_session_create,
3156         .session_update = NULL,
3157         .session_stats_get = NULL,
3158         .session_destroy = dpaa_sec_security_session_destroy,
3159         .set_pkt_metadata = NULL,
3160         .capabilities_get = dpaa_sec_capabilities_get
3161 };
3162
3163 static int
3164 dpaa_sec_uninit(struct rte_cryptodev *dev)
3165 {
3166         struct dpaa_sec_dev_private *internals;
3167
3168         if (dev == NULL)
3169                 return -ENODEV;
3170
3171         internals = dev->data->dev_private;
3172         rte_free(dev->security_ctx);
3173
3174         rte_free(internals);
3175
3176         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3177                       dev->data->name, rte_socket_id());
3178
3179         return 0;
3180 }
3181
3182 static int
3183 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3184 {
3185         struct dpaa_sec_dev_private *internals;
3186         struct rte_security_ctx *security_instance;
3187         struct dpaa_sec_qp *qp;
3188         uint32_t i, flags;
3189         int ret;
3190
3191         PMD_INIT_FUNC_TRACE();
3192
3193         cryptodev->driver_id = cryptodev_driver_id;
3194         cryptodev->dev_ops = &crypto_ops;
3195
3196         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3197         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3198         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3199                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3200                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3201                         RTE_CRYPTODEV_FF_SECURITY |
3202                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3203                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3204                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3205                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3206                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3207
3208         internals = cryptodev->data->dev_private;
3209         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3210         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3211
3212         /*
3213          * For secondary processes, we don't initialise any further as primary
3214          * has already done this work. Only check we don't need a different
3215          * RX function
3216          */
3217         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3218                 DPAA_SEC_WARN("Device already init by primary process");
3219                 return 0;
3220         }
3221
3222         /* Initialize security_ctx only for primary process*/
3223         security_instance = rte_malloc("rte_security_instances_ops",
3224                                 sizeof(struct rte_security_ctx), 0);
3225         if (security_instance == NULL)
3226                 return -ENOMEM;
3227         security_instance->device = (void *)cryptodev;
3228         security_instance->ops = &dpaa_sec_security_ops;
3229         security_instance->sess_cnt = 0;
3230         cryptodev->security_ctx = security_instance;
3231
3232         rte_spinlock_init(&internals->lock);
3233         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3234                 /* init qman fq for queue pair */
3235                 qp = &internals->qps[i];
3236                 ret = dpaa_sec_init_tx(&qp->outq);
3237                 if (ret) {
3238                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3239                         goto init_error;
3240                 }
3241         }
3242
3243         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3244                 QMAN_FQ_FLAG_TO_DCPORTAL;
3245         for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
3246                 /* create rx qman fq for sessions*/
3247                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3248                 if (unlikely(ret != 0)) {
3249                         DPAA_SEC_ERR("sec qman_create_fq failed");
3250                         goto init_error;
3251                 }
3252         }
3253
3254         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3255         return 0;
3256
3257 init_error:
3258         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3259
3260         dpaa_sec_uninit(cryptodev);
3261         return -EFAULT;
3262 }
3263
3264 static int
3265 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3266                                 struct rte_dpaa_device *dpaa_dev)
3267 {
3268         struct rte_cryptodev *cryptodev;
3269         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3270
3271         int retval;
3272
3273         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3274
3275         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3276         if (cryptodev == NULL)
3277                 return -ENOMEM;
3278
3279         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3280                 cryptodev->data->dev_private = rte_zmalloc_socket(
3281                                         "cryptodev private structure",
3282                                         sizeof(struct dpaa_sec_dev_private),
3283                                         RTE_CACHE_LINE_SIZE,
3284                                         rte_socket_id());
3285
3286                 if (cryptodev->data->dev_private == NULL)
3287                         rte_panic("Cannot allocate memzone for private "
3288                                         "device data");
3289         }
3290
3291         dpaa_dev->crypto_dev = cryptodev;
3292         cryptodev->device = &dpaa_dev->device;
3293
3294         /* init user callbacks */
3295         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3296
3297         /* if sec device version is not configured */
3298         if (!rta_get_sec_era()) {
3299                 const struct device_node *caam_node;
3300
3301                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3302                         const uint32_t *prop = of_get_property(caam_node,
3303                                         "fsl,sec-era",
3304                                         NULL);
3305                         if (prop) {
3306                                 rta_set_sec_era(
3307                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3308                                 break;
3309                         }
3310                 }
3311         }
3312
3313         /* Invoke PMD device initialization function */
3314         retval = dpaa_sec_dev_init(cryptodev);
3315         if (retval == 0)
3316                 return 0;
3317
3318         /* In case of error, cleanup is done */
3319         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3320                 rte_free(cryptodev->data->dev_private);
3321
3322         rte_cryptodev_pmd_release_device(cryptodev);
3323
3324         return -ENXIO;
3325 }
3326
3327 static int
3328 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3329 {
3330         struct rte_cryptodev *cryptodev;
3331         int ret;
3332
3333         cryptodev = dpaa_dev->crypto_dev;
3334         if (cryptodev == NULL)
3335                 return -ENODEV;
3336
3337         ret = dpaa_sec_uninit(cryptodev);
3338         if (ret)
3339                 return ret;
3340
3341         return rte_cryptodev_pmd_destroy(cryptodev);
3342 }
3343
3344 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3345         .drv_type = FSL_DPAA_CRYPTO,
3346         .driver = {
3347                 .name = "DPAA SEC PMD"
3348         },
3349         .probe = cryptodev_dpaa_sec_probe,
3350         .remove = cryptodev_dpaa_sec_remove,
3351 };
3352
3353 static struct cryptodev_driver dpaa_sec_crypto_drv;
3354
3355 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3356 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3357                 cryptodev_driver_id);
3358
3359 RTE_INIT(dpaa_sec_init_log)
3360 {
3361         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3362         if (dpaa_logtype_sec >= 0)
3363                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
3364 }