crypto/dpaa_sec: support IPv6 tunnel for protocol offload
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_log.h>
41 #include <dpaax_iova_table.h>
42
43 enum rta_sec_era rta_sec_era;
44
45 int dpaa_logtype_sec;
46
47 static uint8_t cryptodev_driver_id;
48
49 static __thread struct rte_crypto_op **dpaa_sec_ops;
50 static __thread int dpaa_sec_op_nb;
51
52 static int
53 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54
55 static inline void
56 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
57 {
58         if (!ctx->fd_status) {
59                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
60         } else {
61                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
62                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63         }
64
65         /* report op status to sym->op and then free the ctx memory  */
66         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
67 }
68
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
71 {
72         struct dpaa_sec_op_ctx *ctx;
73         int retval;
74
75         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
76         if (!ctx || retval) {
77                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78                 return NULL;
79         }
80         /*
81          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84          * each packet, memset is costlier than dcbz_64().
85          */
86         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
87         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
88         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
89         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
90
91         ctx->ctx_pool = ses->ctx_pool;
92         ctx->vtop_offset = (size_t) ctx
93                                 - rte_mempool_virt2iova(ctx);
94
95         return ctx;
96 }
97
98 static inline rte_iova_t
99 dpaa_mem_vtop(void *vaddr)
100 {
101         const struct rte_memseg *ms;
102
103         ms = rte_mem_virt2memseg(vaddr, NULL);
104         if (ms) {
105                 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
106                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
107         }
108         return (size_t)NULL;
109 }
110
111 static inline void *
112 dpaa_mem_ptov(rte_iova_t paddr)
113 {
114         void *va;
115
116         va = (void *)dpaax_iova_table_get_va(paddr);
117         if (likely(va))
118                 return va;
119
120         return rte_mem_iova2virt(paddr);
121 }
122
123 static void
124 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125                    struct qman_fq *fq,
126                    const struct qm_mr_entry *msg)
127 {
128         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
129                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
130 }
131
132 /* initialize the queue with dest chan as caam chan so that
133  * all the packets in this queue could be dispatched into caam
134  */
135 static int
136 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
137                  uint32_t fqid_out)
138 {
139         struct qm_mcc_initfq fq_opts;
140         uint32_t flags;
141         int ret = -1;
142
143         /* Clear FQ options */
144         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145
146         flags = QMAN_INITFQ_FLAG_SCHED;
147         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
148                           QM_INITFQ_WE_CONTEXTB;
149
150         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
151         fq_opts.fqd.context_b = fqid_out;
152         fq_opts.fqd.dest.channel = qm_channel_caam;
153         fq_opts.fqd.dest.wq = 0;
154
155         fq_in->cb.ern  = ern_sec_fq_handler;
156
157         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158
159         ret = qman_init_fq(fq_in, flags, &fq_opts);
160         if (unlikely(ret != 0))
161                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
162
163         return ret;
164 }
165
166 /* something is put into in_fq and caam put the crypto result into out_fq */
167 static enum qman_cb_dqrr_result
168 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
169                   struct qman_fq *fq __always_unused,
170                   const struct qm_dqrr_entry *dqrr)
171 {
172         const struct qm_fd *fd;
173         struct dpaa_sec_job *job;
174         struct dpaa_sec_op_ctx *ctx;
175
176         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
177                 return qman_cb_dqrr_defer;
178
179         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
180                 return qman_cb_dqrr_consume;
181
182         fd = &dqrr->fd;
183         /* sg is embedded in an op ctx,
184          * sg[0] is for output
185          * sg[1] for input
186          */
187         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188
189         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
190         ctx->fd_status = fd->status;
191         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
192                 struct qm_sg_entry *sg_out;
193                 uint32_t len;
194
195                 sg_out = &job->sg[0];
196                 hw_sg_to_cpu(sg_out);
197                 len = sg_out->length;
198                 ctx->op->sym->m_src->pkt_len = len;
199                 ctx->op->sym->m_src->data_len = len;
200         }
201         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
202         dpaa_sec_op_ending(ctx);
203
204         return qman_cb_dqrr_consume;
205 }
206
207 /* caam result is put into this queue */
208 static int
209 dpaa_sec_init_tx(struct qman_fq *fq)
210 {
211         int ret;
212         struct qm_mcc_initfq opts;
213         uint32_t flags;
214
215         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
216                 QMAN_FQ_FLAG_DYNAMIC_FQID;
217
218         ret = qman_create_fq(0, flags, fq);
219         if (unlikely(ret)) {
220                 DPAA_SEC_ERR("qman_create_fq failed");
221                 return ret;
222         }
223
224         memset(&opts, 0, sizeof(opts));
225         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
226                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
227
228         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
229
230         fq->cb.dqrr = dqrr_out_fq_cb_rx;
231         fq->cb.ern  = ern_sec_fq_handler;
232
233         ret = qman_init_fq(fq, 0, &opts);
234         if (unlikely(ret)) {
235                 DPAA_SEC_ERR("unable to init caam source fq!");
236                 return ret;
237         }
238
239         return ret;
240 }
241
242 static inline int is_cipher_only(dpaa_sec_session *ses)
243 {
244         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
245                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
246 }
247
248 static inline int is_auth_only(dpaa_sec_session *ses)
249 {
250         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
251                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
252 }
253
254 static inline int is_aead(dpaa_sec_session *ses)
255 {
256         return ((ses->cipher_alg == 0) &&
257                 (ses->auth_alg == 0) &&
258                 (ses->aead_alg != 0));
259 }
260
261 static inline int is_auth_cipher(dpaa_sec_session *ses)
262 {
263         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
264                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
265                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
266 }
267
268 static inline int is_proto_ipsec(dpaa_sec_session *ses)
269 {
270         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
271 }
272
273 static inline int is_proto_pdcp(dpaa_sec_session *ses)
274 {
275         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
276 }
277
278 static inline int is_encode(dpaa_sec_session *ses)
279 {
280         return ses->dir == DIR_ENC;
281 }
282
283 static inline int is_decode(dpaa_sec_session *ses)
284 {
285         return ses->dir == DIR_DEC;
286 }
287
288 static inline void
289 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
290 {
291         switch (ses->auth_alg) {
292         case RTE_CRYPTO_AUTH_NULL:
293                 alginfo_a->algtype =
294                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
295                         OP_PCL_IPSEC_HMAC_NULL : 0;
296                 ses->digest_length = 0;
297                 break;
298         case RTE_CRYPTO_AUTH_MD5_HMAC:
299                 alginfo_a->algtype =
300                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
302                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
303                 break;
304         case RTE_CRYPTO_AUTH_SHA1_HMAC:
305                 alginfo_a->algtype =
306                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
308                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309                 break;
310         case RTE_CRYPTO_AUTH_SHA224_HMAC:
311                 alginfo_a->algtype =
312                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
314                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
315                 break;
316         case RTE_CRYPTO_AUTH_SHA256_HMAC:
317                 alginfo_a->algtype =
318                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
319                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
320                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321                 break;
322         case RTE_CRYPTO_AUTH_SHA384_HMAC:
323                 alginfo_a->algtype =
324                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
325                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
326                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
327                 break;
328         case RTE_CRYPTO_AUTH_SHA512_HMAC:
329                 alginfo_a->algtype =
330                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
331                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
332                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
333                 break;
334         default:
335                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
336         }
337 }
338
339 static inline void
340 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
341 {
342         switch (ses->cipher_alg) {
343         case RTE_CRYPTO_CIPHER_NULL:
344                 alginfo_c->algtype =
345                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346                         OP_PCL_IPSEC_NULL : 0;
347                 break;
348         case RTE_CRYPTO_CIPHER_AES_CBC:
349                 alginfo_c->algtype =
350                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
351                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
352                 alginfo_c->algmode = OP_ALG_AAI_CBC;
353                 break;
354         case RTE_CRYPTO_CIPHER_3DES_CBC:
355                 alginfo_c->algtype =
356                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
357                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
358                 alginfo_c->algmode = OP_ALG_AAI_CBC;
359                 break;
360         case RTE_CRYPTO_CIPHER_AES_CTR:
361                 alginfo_c->algtype =
362                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
363                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
364                 alginfo_c->algmode = OP_ALG_AAI_CTR;
365                 break;
366         default:
367                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
368         }
369 }
370
371 static inline void
372 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
373 {
374         switch (ses->aead_alg) {
375         case RTE_CRYPTO_AEAD_AES_GCM:
376                 alginfo->algtype = OP_ALG_ALGSEL_AES;
377                 alginfo->algmode = OP_ALG_AAI_GCM;
378                 break;
379         default:
380                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
381         }
382 }
383
384 static int
385 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
386 {
387         struct alginfo authdata = {0}, cipherdata = {0};
388         struct sec_cdb *cdb = &ses->cdb;
389         int32_t shared_desc_len = 0;
390         int err;
391 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
392         int swap = false;
393 #else
394         int swap = true;
395 #endif
396
397         switch (ses->cipher_alg) {
398         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
399                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
400                 break;
401         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
402                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
403                 break;
404         case RTE_CRYPTO_CIPHER_AES_CTR:
405                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
406                 break;
407         case RTE_CRYPTO_CIPHER_NULL:
408                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
409                 break;
410         default:
411                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
412                               ses->cipher_alg);
413                 return -1;
414         }
415
416         cipherdata.key = (size_t)ses->cipher_key.data;
417         cipherdata.keylen = ses->cipher_key.length;
418         cipherdata.key_enc_flags = 0;
419         cipherdata.key_type = RTA_DATA_IMM;
420
421         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
422                 switch (ses->auth_alg) {
423                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
424                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
425                         break;
426                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
427                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
428                         break;
429                 case RTE_CRYPTO_AUTH_AES_CMAC:
430                         authdata.algtype = PDCP_AUTH_TYPE_AES;
431                         break;
432                 case RTE_CRYPTO_AUTH_NULL:
433                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
434                         break;
435                 default:
436                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
437                                       ses->auth_alg);
438                         return -1;
439                 }
440
441                 authdata.key = (size_t)ses->auth_key.data;
442                 authdata.keylen = ses->auth_key.length;
443                 authdata.key_enc_flags = 0;
444                 authdata.key_type = RTA_DATA_IMM;
445
446                 cdb->sh_desc[0] = cipherdata.keylen;
447                 cdb->sh_desc[1] = authdata.keylen;
448                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
449                                        MIN_JOB_DESC_SIZE,
450                                        (unsigned int *)cdb->sh_desc,
451                                        &cdb->sh_desc[2], 2);
452
453                 if (err < 0) {
454                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
455                         return err;
456                 }
457                 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
458                         cipherdata.key = (size_t)dpaa_mem_vtop(
459                                                 (void *)(size_t)cipherdata.key);
460                         cipherdata.key_type = RTA_DATA_PTR;
461                 }
462                 if (!(cdb->sh_desc[2] & (1<<1)) &&  authdata.keylen) {
463                         authdata.key = (size_t)dpaa_mem_vtop(
464                                                 (void *)(size_t)authdata.key);
465                         authdata.key_type = RTA_DATA_PTR;
466                 }
467
468                 cdb->sh_desc[0] = 0;
469                 cdb->sh_desc[1] = 0;
470                 cdb->sh_desc[2] = 0;
471
472                 if (ses->dir == DIR_ENC)
473                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
474                                         cdb->sh_desc, 1, swap,
475                                         ses->pdcp.hfn,
476                                         ses->pdcp.bearer,
477                                         ses->pdcp.pkt_dir,
478                                         ses->pdcp.hfn_threshold,
479                                         &cipherdata, &authdata,
480                                         0);
481                 else if (ses->dir == DIR_DEC)
482                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
483                                         cdb->sh_desc, 1, swap,
484                                         ses->pdcp.hfn,
485                                         ses->pdcp.bearer,
486                                         ses->pdcp.pkt_dir,
487                                         ses->pdcp.hfn_threshold,
488                                         &cipherdata, &authdata,
489                                         0);
490         } else {
491                 cdb->sh_desc[0] = cipherdata.keylen;
492                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
493                                        MIN_JOB_DESC_SIZE,
494                                        (unsigned int *)cdb->sh_desc,
495                                        &cdb->sh_desc[2], 1);
496
497                 if (err < 0) {
498                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
499                         return err;
500                 }
501                 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
502                         cipherdata.key = (size_t)dpaa_mem_vtop(
503                                                 (void *)(size_t)cipherdata.key);
504                         cipherdata.key_type = RTA_DATA_PTR;
505                 }
506                 cdb->sh_desc[0] = 0;
507                 cdb->sh_desc[1] = 0;
508                 cdb->sh_desc[2] = 0;
509
510                 if (ses->dir == DIR_ENC)
511                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
512                                         cdb->sh_desc, 1, swap,
513                                         ses->pdcp.sn_size,
514                                         ses->pdcp.hfn,
515                                         ses->pdcp.bearer,
516                                         ses->pdcp.pkt_dir,
517                                         ses->pdcp.hfn_threshold,
518                                         &cipherdata, 0);
519                 else if (ses->dir == DIR_DEC)
520                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
521                                         cdb->sh_desc, 1, swap,
522                                         ses->pdcp.sn_size,
523                                         ses->pdcp.hfn,
524                                         ses->pdcp.bearer,
525                                         ses->pdcp.pkt_dir,
526                                         ses->pdcp.hfn_threshold,
527                                         &cipherdata, 0);
528         }
529
530         return shared_desc_len;
531 }
532
533 /* prepare ipsec proto command block of the session */
534 static int
535 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
536 {
537         struct alginfo cipherdata = {0}, authdata = {0};
538         struct sec_cdb *cdb = &ses->cdb;
539         int32_t shared_desc_len = 0;
540         int err;
541 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
542         int swap = false;
543 #else
544         int swap = true;
545 #endif
546
547         caam_cipher_alg(ses, &cipherdata);
548         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
549                 DPAA_SEC_ERR("not supported cipher alg");
550                 return -ENOTSUP;
551         }
552
553         cipherdata.key = (size_t)ses->cipher_key.data;
554         cipherdata.keylen = ses->cipher_key.length;
555         cipherdata.key_enc_flags = 0;
556         cipherdata.key_type = RTA_DATA_IMM;
557
558         caam_auth_alg(ses, &authdata);
559         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
560                 DPAA_SEC_ERR("not supported auth alg");
561                 return -ENOTSUP;
562         }
563
564         authdata.key = (size_t)ses->auth_key.data;
565         authdata.keylen = ses->auth_key.length;
566         authdata.key_enc_flags = 0;
567         authdata.key_type = RTA_DATA_IMM;
568
569         cdb->sh_desc[0] = cipherdata.keylen;
570         cdb->sh_desc[1] = authdata.keylen;
571         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
572                                MIN_JOB_DESC_SIZE,
573                                (unsigned int *)cdb->sh_desc,
574                                &cdb->sh_desc[2], 2);
575
576         if (err < 0) {
577                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
578                 return err;
579         }
580         if (cdb->sh_desc[2] & 1)
581                 cipherdata.key_type = RTA_DATA_IMM;
582         else {
583                 cipherdata.key = (size_t)dpaa_mem_vtop(
584                                         (void *)(size_t)cipherdata.key);
585                 cipherdata.key_type = RTA_DATA_PTR;
586         }
587         if (cdb->sh_desc[2] & (1<<1))
588                 authdata.key_type = RTA_DATA_IMM;
589         else {
590                 authdata.key = (size_t)dpaa_mem_vtop(
591                                         (void *)(size_t)authdata.key);
592                 authdata.key_type = RTA_DATA_PTR;
593         }
594
595         cdb->sh_desc[0] = 0;
596         cdb->sh_desc[1] = 0;
597         cdb->sh_desc[2] = 0;
598         if (ses->dir == DIR_ENC) {
599                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
600                                 cdb->sh_desc,
601                                 true, swap, SHR_SERIAL,
602                                 &ses->encap_pdb,
603                                 (uint8_t *)&ses->ip4_hdr,
604                                 &cipherdata, &authdata);
605         } else if (ses->dir == DIR_DEC) {
606                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
607                                 cdb->sh_desc,
608                                 true, swap, SHR_SERIAL,
609                                 &ses->decap_pdb,
610                                 &cipherdata, &authdata);
611         }
612         return shared_desc_len;
613 }
614
615 /* prepare command block of the session */
616 static int
617 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
618 {
619         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
620         int32_t shared_desc_len = 0;
621         struct sec_cdb *cdb = &ses->cdb;
622         int err;
623 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
624         int swap = false;
625 #else
626         int swap = true;
627 #endif
628
629         memset(cdb, 0, sizeof(struct sec_cdb));
630
631         if (is_proto_ipsec(ses)) {
632                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
633         } else if (is_proto_pdcp(ses)) {
634                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
635         } else if (is_cipher_only(ses)) {
636                 caam_cipher_alg(ses, &alginfo_c);
637                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
638                         DPAA_SEC_ERR("not supported cipher alg");
639                         return -ENOTSUP;
640                 }
641
642                 alginfo_c.key = (size_t)ses->cipher_key.data;
643                 alginfo_c.keylen = ses->cipher_key.length;
644                 alginfo_c.key_enc_flags = 0;
645                 alginfo_c.key_type = RTA_DATA_IMM;
646
647                 shared_desc_len = cnstr_shdsc_blkcipher(
648                                                 cdb->sh_desc, true,
649                                                 swap, SHR_NEVER, &alginfo_c,
650                                                 NULL,
651                                                 ses->iv.length,
652                                                 ses->dir);
653         } else if (is_auth_only(ses)) {
654                 caam_auth_alg(ses, &alginfo_a);
655                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
656                         DPAA_SEC_ERR("not supported auth alg");
657                         return -ENOTSUP;
658                 }
659
660                 alginfo_a.key = (size_t)ses->auth_key.data;
661                 alginfo_a.keylen = ses->auth_key.length;
662                 alginfo_a.key_enc_flags = 0;
663                 alginfo_a.key_type = RTA_DATA_IMM;
664
665                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
666                                                    swap, SHR_NEVER, &alginfo_a,
667                                                    !ses->dir,
668                                                    ses->digest_length);
669         } else if (is_aead(ses)) {
670                 caam_aead_alg(ses, &alginfo);
671                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
672                         DPAA_SEC_ERR("not supported aead alg");
673                         return -ENOTSUP;
674                 }
675                 alginfo.key = (size_t)ses->aead_key.data;
676                 alginfo.keylen = ses->aead_key.length;
677                 alginfo.key_enc_flags = 0;
678                 alginfo.key_type = RTA_DATA_IMM;
679
680                 if (ses->dir == DIR_ENC)
681                         shared_desc_len = cnstr_shdsc_gcm_encap(
682                                         cdb->sh_desc, true, swap, SHR_NEVER,
683                                         &alginfo,
684                                         ses->iv.length,
685                                         ses->digest_length);
686                 else
687                         shared_desc_len = cnstr_shdsc_gcm_decap(
688                                         cdb->sh_desc, true, swap, SHR_NEVER,
689                                         &alginfo,
690                                         ses->iv.length,
691                                         ses->digest_length);
692         } else {
693                 caam_cipher_alg(ses, &alginfo_c);
694                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
695                         DPAA_SEC_ERR("not supported cipher alg");
696                         return -ENOTSUP;
697                 }
698
699                 alginfo_c.key = (size_t)ses->cipher_key.data;
700                 alginfo_c.keylen = ses->cipher_key.length;
701                 alginfo_c.key_enc_flags = 0;
702                 alginfo_c.key_type = RTA_DATA_IMM;
703
704                 caam_auth_alg(ses, &alginfo_a);
705                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
706                         DPAA_SEC_ERR("not supported auth alg");
707                         return -ENOTSUP;
708                 }
709
710                 alginfo_a.key = (size_t)ses->auth_key.data;
711                 alginfo_a.keylen = ses->auth_key.length;
712                 alginfo_a.key_enc_flags = 0;
713                 alginfo_a.key_type = RTA_DATA_IMM;
714
715                 cdb->sh_desc[0] = alginfo_c.keylen;
716                 cdb->sh_desc[1] = alginfo_a.keylen;
717                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
718                                        MIN_JOB_DESC_SIZE,
719                                        (unsigned int *)cdb->sh_desc,
720                                        &cdb->sh_desc[2], 2);
721
722                 if (err < 0) {
723                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
724                         return err;
725                 }
726                 if (cdb->sh_desc[2] & 1)
727                         alginfo_c.key_type = RTA_DATA_IMM;
728                 else {
729                         alginfo_c.key = (size_t)dpaa_mem_vtop(
730                                                 (void *)(size_t)alginfo_c.key);
731                         alginfo_c.key_type = RTA_DATA_PTR;
732                 }
733                 if (cdb->sh_desc[2] & (1<<1))
734                         alginfo_a.key_type = RTA_DATA_IMM;
735                 else {
736                         alginfo_a.key = (size_t)dpaa_mem_vtop(
737                                                 (void *)(size_t)alginfo_a.key);
738                         alginfo_a.key_type = RTA_DATA_PTR;
739                 }
740                 cdb->sh_desc[0] = 0;
741                 cdb->sh_desc[1] = 0;
742                 cdb->sh_desc[2] = 0;
743                 /* Auth_only_len is set as 0 here and it will be
744                  * overwritten in fd for each packet.
745                  */
746                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
747                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
748                                 ses->iv.length, 0,
749                                 ses->digest_length, ses->dir);
750         }
751
752         if (shared_desc_len < 0) {
753                 DPAA_SEC_ERR("error in preparing command block");
754                 return shared_desc_len;
755         }
756
757         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
758         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
759         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
760
761         return 0;
762 }
763
764 /* qp is lockless, should be accessed by only one thread */
765 static int
766 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
767 {
768         struct qman_fq *fq;
769         unsigned int pkts = 0;
770         int num_rx_bufs, ret;
771         struct qm_dqrr_entry *dq;
772         uint32_t vdqcr_flags = 0;
773
774         fq = &qp->outq;
775         /*
776          * Until request for four buffers, we provide exact number of buffers.
777          * Otherwise we do not set the QM_VDQCR_EXACT flag.
778          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
779          * requested, so we request two less in this case.
780          */
781         if (nb_ops < 4) {
782                 vdqcr_flags = QM_VDQCR_EXACT;
783                 num_rx_bufs = nb_ops;
784         } else {
785                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
786                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
787         }
788         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
789         if (ret)
790                 return 0;
791
792         do {
793                 const struct qm_fd *fd;
794                 struct dpaa_sec_job *job;
795                 struct dpaa_sec_op_ctx *ctx;
796                 struct rte_crypto_op *op;
797
798                 dq = qman_dequeue(fq);
799                 if (!dq)
800                         continue;
801
802                 fd = &dq->fd;
803                 /* sg is embedded in an op ctx,
804                  * sg[0] is for output
805                  * sg[1] for input
806                  */
807                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
808
809                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
810                 ctx->fd_status = fd->status;
811                 op = ctx->op;
812                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
813                         struct qm_sg_entry *sg_out;
814                         uint32_t len;
815
816                         sg_out = &job->sg[0];
817                         hw_sg_to_cpu(sg_out);
818                         len = sg_out->length;
819                         op->sym->m_src->pkt_len = len;
820                         op->sym->m_src->data_len = len;
821                 }
822                 if (!ctx->fd_status) {
823                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
824                 } else {
825                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
826                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
827                 }
828                 ops[pkts++] = op;
829
830                 /* report op status to sym->op and then free the ctx memeory */
831                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
832
833                 qman_dqrr_consume(fq, dq);
834         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
835
836         return pkts;
837 }
838
839 static inline struct dpaa_sec_job *
840 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
841 {
842         struct rte_crypto_sym_op *sym = op->sym;
843         struct rte_mbuf *mbuf = sym->m_src;
844         struct dpaa_sec_job *cf;
845         struct dpaa_sec_op_ctx *ctx;
846         struct qm_sg_entry *sg, *out_sg, *in_sg;
847         phys_addr_t start_addr;
848         uint8_t *old_digest, extra_segs;
849
850         if (is_decode(ses))
851                 extra_segs = 3;
852         else
853                 extra_segs = 2;
854
855         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
856                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
857                                 MAX_SG_ENTRIES);
858                 return NULL;
859         }
860         ctx = dpaa_sec_alloc_ctx(ses);
861         if (!ctx)
862                 return NULL;
863
864         cf = &ctx->job;
865         ctx->op = op;
866         old_digest = ctx->digest;
867
868         /* output */
869         out_sg = &cf->sg[0];
870         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
871         out_sg->length = ses->digest_length;
872         cpu_to_hw_sg(out_sg);
873
874         /* input */
875         in_sg = &cf->sg[1];
876         /* need to extend the input to a compound frame */
877         in_sg->extension = 1;
878         in_sg->final = 1;
879         in_sg->length = sym->auth.data.length;
880         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
881
882         /* 1st seg */
883         sg = in_sg + 1;
884         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
885         sg->length = mbuf->data_len - sym->auth.data.offset;
886         sg->offset = sym->auth.data.offset;
887
888         /* Successive segs */
889         mbuf = mbuf->next;
890         while (mbuf) {
891                 cpu_to_hw_sg(sg);
892                 sg++;
893                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
894                 sg->length = mbuf->data_len;
895                 mbuf = mbuf->next;
896         }
897
898         if (is_decode(ses)) {
899                 /* Digest verification case */
900                 cpu_to_hw_sg(sg);
901                 sg++;
902                 rte_memcpy(old_digest, sym->auth.digest.data,
903                                 ses->digest_length);
904                 start_addr = dpaa_mem_vtop(old_digest);
905                 qm_sg_entry_set64(sg, start_addr);
906                 sg->length = ses->digest_length;
907                 in_sg->length += ses->digest_length;
908         } else {
909                 /* Digest calculation case */
910                 sg->length -= ses->digest_length;
911         }
912         sg->final = 1;
913         cpu_to_hw_sg(sg);
914         cpu_to_hw_sg(in_sg);
915
916         return cf;
917 }
918
919 /**
920  * packet looks like:
921  *              |<----data_len------->|
922  *    |ip_header|ah_header|icv|payload|
923  *              ^
924  *              |
925  *         mbuf->pkt.data
926  */
927 static inline struct dpaa_sec_job *
928 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
929 {
930         struct rte_crypto_sym_op *sym = op->sym;
931         struct rte_mbuf *mbuf = sym->m_src;
932         struct dpaa_sec_job *cf;
933         struct dpaa_sec_op_ctx *ctx;
934         struct qm_sg_entry *sg;
935         rte_iova_t start_addr;
936         uint8_t *old_digest;
937
938         ctx = dpaa_sec_alloc_ctx(ses);
939         if (!ctx)
940                 return NULL;
941
942         cf = &ctx->job;
943         ctx->op = op;
944         old_digest = ctx->digest;
945
946         start_addr = rte_pktmbuf_iova(mbuf);
947         /* output */
948         sg = &cf->sg[0];
949         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
950         sg->length = ses->digest_length;
951         cpu_to_hw_sg(sg);
952
953         /* input */
954         sg = &cf->sg[1];
955         if (is_decode(ses)) {
956                 /* need to extend the input to a compound frame */
957                 sg->extension = 1;
958                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
959                 sg->length = sym->auth.data.length + ses->digest_length;
960                 sg->final = 1;
961                 cpu_to_hw_sg(sg);
962
963                 sg = &cf->sg[2];
964                 /* hash result or digest, save digest first */
965                 rte_memcpy(old_digest, sym->auth.digest.data,
966                            ses->digest_length);
967                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
968                 sg->length = sym->auth.data.length;
969                 cpu_to_hw_sg(sg);
970
971                 /* let's check digest by hw */
972                 start_addr = dpaa_mem_vtop(old_digest);
973                 sg++;
974                 qm_sg_entry_set64(sg, start_addr);
975                 sg->length = ses->digest_length;
976                 sg->final = 1;
977                 cpu_to_hw_sg(sg);
978         } else {
979                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
980                 sg->length = sym->auth.data.length;
981                 sg->final = 1;
982                 cpu_to_hw_sg(sg);
983         }
984
985         return cf;
986 }
987
988 static inline struct dpaa_sec_job *
989 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
990 {
991         struct rte_crypto_sym_op *sym = op->sym;
992         struct dpaa_sec_job *cf;
993         struct dpaa_sec_op_ctx *ctx;
994         struct qm_sg_entry *sg, *out_sg, *in_sg;
995         struct rte_mbuf *mbuf;
996         uint8_t req_segs;
997         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
998                         ses->iv.offset);
999
1000         if (sym->m_dst) {
1001                 mbuf = sym->m_dst;
1002                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1003         } else {
1004                 mbuf = sym->m_src;
1005                 req_segs = mbuf->nb_segs * 2 + 3;
1006         }
1007
1008         if (req_segs > MAX_SG_ENTRIES) {
1009                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1010                                 MAX_SG_ENTRIES);
1011                 return NULL;
1012         }
1013
1014         ctx = dpaa_sec_alloc_ctx(ses);
1015         if (!ctx)
1016                 return NULL;
1017
1018         cf = &ctx->job;
1019         ctx->op = op;
1020
1021         /* output */
1022         out_sg = &cf->sg[0];
1023         out_sg->extension = 1;
1024         out_sg->length = sym->cipher.data.length;
1025         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1026         cpu_to_hw_sg(out_sg);
1027
1028         /* 1st seg */
1029         sg = &cf->sg[2];
1030         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1031         sg->length = mbuf->data_len - sym->cipher.data.offset;
1032         sg->offset = sym->cipher.data.offset;
1033
1034         /* Successive segs */
1035         mbuf = mbuf->next;
1036         while (mbuf) {
1037                 cpu_to_hw_sg(sg);
1038                 sg++;
1039                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1040                 sg->length = mbuf->data_len;
1041                 mbuf = mbuf->next;
1042         }
1043         sg->final = 1;
1044         cpu_to_hw_sg(sg);
1045
1046         /* input */
1047         mbuf = sym->m_src;
1048         in_sg = &cf->sg[1];
1049         in_sg->extension = 1;
1050         in_sg->final = 1;
1051         in_sg->length = sym->cipher.data.length + ses->iv.length;
1052
1053         sg++;
1054         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1055         cpu_to_hw_sg(in_sg);
1056
1057         /* IV */
1058         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1059         sg->length = ses->iv.length;
1060         cpu_to_hw_sg(sg);
1061
1062         /* 1st seg */
1063         sg++;
1064         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1065         sg->length = mbuf->data_len - sym->cipher.data.offset;
1066         sg->offset = sym->cipher.data.offset;
1067
1068         /* Successive segs */
1069         mbuf = mbuf->next;
1070         while (mbuf) {
1071                 cpu_to_hw_sg(sg);
1072                 sg++;
1073                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1074                 sg->length = mbuf->data_len;
1075                 mbuf = mbuf->next;
1076         }
1077         sg->final = 1;
1078         cpu_to_hw_sg(sg);
1079
1080         return cf;
1081 }
1082
1083 static inline struct dpaa_sec_job *
1084 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1085 {
1086         struct rte_crypto_sym_op *sym = op->sym;
1087         struct dpaa_sec_job *cf;
1088         struct dpaa_sec_op_ctx *ctx;
1089         struct qm_sg_entry *sg;
1090         rte_iova_t src_start_addr, dst_start_addr;
1091         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1092                         ses->iv.offset);
1093
1094         ctx = dpaa_sec_alloc_ctx(ses);
1095         if (!ctx)
1096                 return NULL;
1097
1098         cf = &ctx->job;
1099         ctx->op = op;
1100
1101         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1102
1103         if (sym->m_dst)
1104                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1105         else
1106                 dst_start_addr = src_start_addr;
1107
1108         /* output */
1109         sg = &cf->sg[0];
1110         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1111         sg->length = sym->cipher.data.length + ses->iv.length;
1112         cpu_to_hw_sg(sg);
1113
1114         /* input */
1115         sg = &cf->sg[1];
1116
1117         /* need to extend the input to a compound frame */
1118         sg->extension = 1;
1119         sg->final = 1;
1120         sg->length = sym->cipher.data.length + ses->iv.length;
1121         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1122         cpu_to_hw_sg(sg);
1123
1124         sg = &cf->sg[2];
1125         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1126         sg->length = ses->iv.length;
1127         cpu_to_hw_sg(sg);
1128
1129         sg++;
1130         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1131         sg->length = sym->cipher.data.length;
1132         sg->final = 1;
1133         cpu_to_hw_sg(sg);
1134
1135         return cf;
1136 }
1137
1138 static inline struct dpaa_sec_job *
1139 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1140 {
1141         struct rte_crypto_sym_op *sym = op->sym;
1142         struct dpaa_sec_job *cf;
1143         struct dpaa_sec_op_ctx *ctx;
1144         struct qm_sg_entry *sg, *out_sg, *in_sg;
1145         struct rte_mbuf *mbuf;
1146         uint8_t req_segs;
1147         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1148                         ses->iv.offset);
1149
1150         if (sym->m_dst) {
1151                 mbuf = sym->m_dst;
1152                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1153         } else {
1154                 mbuf = sym->m_src;
1155                 req_segs = mbuf->nb_segs * 2 + 4;
1156         }
1157
1158         if (ses->auth_only_len)
1159                 req_segs++;
1160
1161         if (req_segs > MAX_SG_ENTRIES) {
1162                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1163                                 MAX_SG_ENTRIES);
1164                 return NULL;
1165         }
1166
1167         ctx = dpaa_sec_alloc_ctx(ses);
1168         if (!ctx)
1169                 return NULL;
1170
1171         cf = &ctx->job;
1172         ctx->op = op;
1173
1174         rte_prefetch0(cf->sg);
1175
1176         /* output */
1177         out_sg = &cf->sg[0];
1178         out_sg->extension = 1;
1179         if (is_encode(ses))
1180                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1181                                                 + ses->digest_length;
1182         else
1183                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1184
1185         /* output sg entries */
1186         sg = &cf->sg[2];
1187         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1188         cpu_to_hw_sg(out_sg);
1189
1190         /* 1st seg */
1191         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1192         sg->length = mbuf->data_len - sym->aead.data.offset +
1193                                         ses->auth_only_len;
1194         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1195
1196         /* Successive segs */
1197         mbuf = mbuf->next;
1198         while (mbuf) {
1199                 cpu_to_hw_sg(sg);
1200                 sg++;
1201                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1202                 sg->length = mbuf->data_len;
1203                 mbuf = mbuf->next;
1204         }
1205         sg->length -= ses->digest_length;
1206
1207         if (is_encode(ses)) {
1208                 cpu_to_hw_sg(sg);
1209                 /* set auth output */
1210                 sg++;
1211                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1212                 sg->length = ses->digest_length;
1213         }
1214         sg->final = 1;
1215         cpu_to_hw_sg(sg);
1216
1217         /* input */
1218         mbuf = sym->m_src;
1219         in_sg = &cf->sg[1];
1220         in_sg->extension = 1;
1221         in_sg->final = 1;
1222         if (is_encode(ses))
1223                 in_sg->length = ses->iv.length + sym->aead.data.length
1224                                                         + ses->auth_only_len;
1225         else
1226                 in_sg->length = ses->iv.length + sym->aead.data.length
1227                                 + ses->auth_only_len + ses->digest_length;
1228
1229         /* input sg entries */
1230         sg++;
1231         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1232         cpu_to_hw_sg(in_sg);
1233
1234         /* 1st seg IV */
1235         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1236         sg->length = ses->iv.length;
1237         cpu_to_hw_sg(sg);
1238
1239         /* 2nd seg auth only */
1240         if (ses->auth_only_len) {
1241                 sg++;
1242                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1243                 sg->length = ses->auth_only_len;
1244                 cpu_to_hw_sg(sg);
1245         }
1246
1247         /* 3rd seg */
1248         sg++;
1249         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1250         sg->length = mbuf->data_len - sym->aead.data.offset;
1251         sg->offset = sym->aead.data.offset;
1252
1253         /* Successive segs */
1254         mbuf = mbuf->next;
1255         while (mbuf) {
1256                 cpu_to_hw_sg(sg);
1257                 sg++;
1258                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1259                 sg->length = mbuf->data_len;
1260                 mbuf = mbuf->next;
1261         }
1262
1263         if (is_decode(ses)) {
1264                 cpu_to_hw_sg(sg);
1265                 sg++;
1266                 memcpy(ctx->digest, sym->aead.digest.data,
1267                         ses->digest_length);
1268                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1269                 sg->length = ses->digest_length;
1270         }
1271         sg->final = 1;
1272         cpu_to_hw_sg(sg);
1273
1274         return cf;
1275 }
1276
1277 static inline struct dpaa_sec_job *
1278 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1279 {
1280         struct rte_crypto_sym_op *sym = op->sym;
1281         struct dpaa_sec_job *cf;
1282         struct dpaa_sec_op_ctx *ctx;
1283         struct qm_sg_entry *sg;
1284         uint32_t length = 0;
1285         rte_iova_t src_start_addr, dst_start_addr;
1286         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1287                         ses->iv.offset);
1288
1289         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1290
1291         if (sym->m_dst)
1292                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1293         else
1294                 dst_start_addr = src_start_addr;
1295
1296         ctx = dpaa_sec_alloc_ctx(ses);
1297         if (!ctx)
1298                 return NULL;
1299
1300         cf = &ctx->job;
1301         ctx->op = op;
1302
1303         /* input */
1304         rte_prefetch0(cf->sg);
1305         sg = &cf->sg[2];
1306         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1307         if (is_encode(ses)) {
1308                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1309                 sg->length = ses->iv.length;
1310                 length += sg->length;
1311                 cpu_to_hw_sg(sg);
1312
1313                 sg++;
1314                 if (ses->auth_only_len) {
1315                         qm_sg_entry_set64(sg,
1316                                           dpaa_mem_vtop(sym->aead.aad.data));
1317                         sg->length = ses->auth_only_len;
1318                         length += sg->length;
1319                         cpu_to_hw_sg(sg);
1320                         sg++;
1321                 }
1322                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1323                 sg->length = sym->aead.data.length;
1324                 length += sg->length;
1325                 sg->final = 1;
1326                 cpu_to_hw_sg(sg);
1327         } else {
1328                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1329                 sg->length = ses->iv.length;
1330                 length += sg->length;
1331                 cpu_to_hw_sg(sg);
1332
1333                 sg++;
1334                 if (ses->auth_only_len) {
1335                         qm_sg_entry_set64(sg,
1336                                           dpaa_mem_vtop(sym->aead.aad.data));
1337                         sg->length = ses->auth_only_len;
1338                         length += sg->length;
1339                         cpu_to_hw_sg(sg);
1340                         sg++;
1341                 }
1342                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1343                 sg->length = sym->aead.data.length;
1344                 length += sg->length;
1345                 cpu_to_hw_sg(sg);
1346
1347                 memcpy(ctx->digest, sym->aead.digest.data,
1348                        ses->digest_length);
1349                 sg++;
1350
1351                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1352                 sg->length = ses->digest_length;
1353                 length += sg->length;
1354                 sg->final = 1;
1355                 cpu_to_hw_sg(sg);
1356         }
1357         /* input compound frame */
1358         cf->sg[1].length = length;
1359         cf->sg[1].extension = 1;
1360         cf->sg[1].final = 1;
1361         cpu_to_hw_sg(&cf->sg[1]);
1362
1363         /* output */
1364         sg++;
1365         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1366         qm_sg_entry_set64(sg,
1367                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1368         sg->length = sym->aead.data.length + ses->auth_only_len;
1369         length = sg->length;
1370         if (is_encode(ses)) {
1371                 cpu_to_hw_sg(sg);
1372                 /* set auth output */
1373                 sg++;
1374                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1375                 sg->length = ses->digest_length;
1376                 length += sg->length;
1377         }
1378         sg->final = 1;
1379         cpu_to_hw_sg(sg);
1380
1381         /* output compound frame */
1382         cf->sg[0].length = length;
1383         cf->sg[0].extension = 1;
1384         cpu_to_hw_sg(&cf->sg[0]);
1385
1386         return cf;
1387 }
1388
1389 static inline struct dpaa_sec_job *
1390 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1391 {
1392         struct rte_crypto_sym_op *sym = op->sym;
1393         struct dpaa_sec_job *cf;
1394         struct dpaa_sec_op_ctx *ctx;
1395         struct qm_sg_entry *sg, *out_sg, *in_sg;
1396         struct rte_mbuf *mbuf;
1397         uint8_t req_segs;
1398         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1399                         ses->iv.offset);
1400
1401         if (sym->m_dst) {
1402                 mbuf = sym->m_dst;
1403                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1404         } else {
1405                 mbuf = sym->m_src;
1406                 req_segs = mbuf->nb_segs * 2 + 4;
1407         }
1408
1409         if (req_segs > MAX_SG_ENTRIES) {
1410                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1411                                 MAX_SG_ENTRIES);
1412                 return NULL;
1413         }
1414
1415         ctx = dpaa_sec_alloc_ctx(ses);
1416         if (!ctx)
1417                 return NULL;
1418
1419         cf = &ctx->job;
1420         ctx->op = op;
1421
1422         rte_prefetch0(cf->sg);
1423
1424         /* output */
1425         out_sg = &cf->sg[0];
1426         out_sg->extension = 1;
1427         if (is_encode(ses))
1428                 out_sg->length = sym->auth.data.length + ses->digest_length;
1429         else
1430                 out_sg->length = sym->auth.data.length;
1431
1432         /* output sg entries */
1433         sg = &cf->sg[2];
1434         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1435         cpu_to_hw_sg(out_sg);
1436
1437         /* 1st seg */
1438         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1439         sg->length = mbuf->data_len - sym->auth.data.offset;
1440         sg->offset = sym->auth.data.offset;
1441
1442         /* Successive segs */
1443         mbuf = mbuf->next;
1444         while (mbuf) {
1445                 cpu_to_hw_sg(sg);
1446                 sg++;
1447                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1448                 sg->length = mbuf->data_len;
1449                 mbuf = mbuf->next;
1450         }
1451         sg->length -= ses->digest_length;
1452
1453         if (is_encode(ses)) {
1454                 cpu_to_hw_sg(sg);
1455                 /* set auth output */
1456                 sg++;
1457                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1458                 sg->length = ses->digest_length;
1459         }
1460         sg->final = 1;
1461         cpu_to_hw_sg(sg);
1462
1463         /* input */
1464         mbuf = sym->m_src;
1465         in_sg = &cf->sg[1];
1466         in_sg->extension = 1;
1467         in_sg->final = 1;
1468         if (is_encode(ses))
1469                 in_sg->length = ses->iv.length + sym->auth.data.length;
1470         else
1471                 in_sg->length = ses->iv.length + sym->auth.data.length
1472                                                 + ses->digest_length;
1473
1474         /* input sg entries */
1475         sg++;
1476         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1477         cpu_to_hw_sg(in_sg);
1478
1479         /* 1st seg IV */
1480         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1481         sg->length = ses->iv.length;
1482         cpu_to_hw_sg(sg);
1483
1484         /* 2nd seg */
1485         sg++;
1486         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1487         sg->length = mbuf->data_len - sym->auth.data.offset;
1488         sg->offset = sym->auth.data.offset;
1489
1490         /* Successive segs */
1491         mbuf = mbuf->next;
1492         while (mbuf) {
1493                 cpu_to_hw_sg(sg);
1494                 sg++;
1495                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1496                 sg->length = mbuf->data_len;
1497                 mbuf = mbuf->next;
1498         }
1499
1500         sg->length -= ses->digest_length;
1501         if (is_decode(ses)) {
1502                 cpu_to_hw_sg(sg);
1503                 sg++;
1504                 memcpy(ctx->digest, sym->auth.digest.data,
1505                         ses->digest_length);
1506                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1507                 sg->length = ses->digest_length;
1508         }
1509         sg->final = 1;
1510         cpu_to_hw_sg(sg);
1511
1512         return cf;
1513 }
1514
1515 static inline struct dpaa_sec_job *
1516 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1517 {
1518         struct rte_crypto_sym_op *sym = op->sym;
1519         struct dpaa_sec_job *cf;
1520         struct dpaa_sec_op_ctx *ctx;
1521         struct qm_sg_entry *sg;
1522         rte_iova_t src_start_addr, dst_start_addr;
1523         uint32_t length = 0;
1524         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1525                         ses->iv.offset);
1526
1527         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1528         if (sym->m_dst)
1529                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1530         else
1531                 dst_start_addr = src_start_addr;
1532
1533         ctx = dpaa_sec_alloc_ctx(ses);
1534         if (!ctx)
1535                 return NULL;
1536
1537         cf = &ctx->job;
1538         ctx->op = op;
1539
1540         /* input */
1541         rte_prefetch0(cf->sg);
1542         sg = &cf->sg[2];
1543         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1544         if (is_encode(ses)) {
1545                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1546                 sg->length = ses->iv.length;
1547                 length += sg->length;
1548                 cpu_to_hw_sg(sg);
1549
1550                 sg++;
1551                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1552                 sg->length = sym->auth.data.length;
1553                 length += sg->length;
1554                 sg->final = 1;
1555                 cpu_to_hw_sg(sg);
1556         } else {
1557                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1558                 sg->length = ses->iv.length;
1559                 length += sg->length;
1560                 cpu_to_hw_sg(sg);
1561
1562                 sg++;
1563
1564                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1565                 sg->length = sym->auth.data.length;
1566                 length += sg->length;
1567                 cpu_to_hw_sg(sg);
1568
1569                 memcpy(ctx->digest, sym->auth.digest.data,
1570                        ses->digest_length);
1571                 sg++;
1572
1573                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1574                 sg->length = ses->digest_length;
1575                 length += sg->length;
1576                 sg->final = 1;
1577                 cpu_to_hw_sg(sg);
1578         }
1579         /* input compound frame */
1580         cf->sg[1].length = length;
1581         cf->sg[1].extension = 1;
1582         cf->sg[1].final = 1;
1583         cpu_to_hw_sg(&cf->sg[1]);
1584
1585         /* output */
1586         sg++;
1587         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1588         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1589         sg->length = sym->cipher.data.length;
1590         length = sg->length;
1591         if (is_encode(ses)) {
1592                 cpu_to_hw_sg(sg);
1593                 /* set auth output */
1594                 sg++;
1595                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1596                 sg->length = ses->digest_length;
1597                 length += sg->length;
1598         }
1599         sg->final = 1;
1600         cpu_to_hw_sg(sg);
1601
1602         /* output compound frame */
1603         cf->sg[0].length = length;
1604         cf->sg[0].extension = 1;
1605         cpu_to_hw_sg(&cf->sg[0]);
1606
1607         return cf;
1608 }
1609
1610 static inline struct dpaa_sec_job *
1611 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1612 {
1613         struct rte_crypto_sym_op *sym = op->sym;
1614         struct dpaa_sec_job *cf;
1615         struct dpaa_sec_op_ctx *ctx;
1616         struct qm_sg_entry *sg;
1617         phys_addr_t src_start_addr, dst_start_addr;
1618
1619         ctx = dpaa_sec_alloc_ctx(ses);
1620         if (!ctx)
1621                 return NULL;
1622         cf = &ctx->job;
1623         ctx->op = op;
1624
1625         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1626
1627         if (sym->m_dst)
1628                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1629         else
1630                 dst_start_addr = src_start_addr;
1631
1632         /* input */
1633         sg = &cf->sg[1];
1634         qm_sg_entry_set64(sg, src_start_addr);
1635         sg->length = sym->m_src->pkt_len;
1636         sg->final = 1;
1637         cpu_to_hw_sg(sg);
1638
1639         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1640         /* output */
1641         sg = &cf->sg[0];
1642         qm_sg_entry_set64(sg, dst_start_addr);
1643         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1644         cpu_to_hw_sg(sg);
1645
1646         return cf;
1647 }
1648
1649 static uint16_t
1650 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1651                        uint16_t nb_ops)
1652 {
1653         /* Function to transmit the frames to given device and queuepair */
1654         uint32_t loop;
1655         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1656         uint16_t num_tx = 0;
1657         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1658         uint32_t frames_to_send;
1659         struct rte_crypto_op *op;
1660         struct dpaa_sec_job *cf;
1661         dpaa_sec_session *ses;
1662         uint32_t auth_only_len;
1663         struct qman_fq *inq[DPAA_SEC_BURST];
1664
1665         while (nb_ops) {
1666                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1667                                 DPAA_SEC_BURST : nb_ops;
1668                 for (loop = 0; loop < frames_to_send; loop++) {
1669                         op = *(ops++);
1670                         switch (op->sess_type) {
1671                         case RTE_CRYPTO_OP_WITH_SESSION:
1672                                 ses = (dpaa_sec_session *)
1673                                         get_sym_session_private_data(
1674                                                         op->sym->session,
1675                                                         cryptodev_driver_id);
1676                                 break;
1677                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1678                                 ses = (dpaa_sec_session *)
1679                                         get_sec_session_private_data(
1680                                                         op->sym->sec_session);
1681                                 break;
1682                         default:
1683                                 DPAA_SEC_DP_ERR(
1684                                         "sessionless crypto op not supported");
1685                                 frames_to_send = loop;
1686                                 nb_ops = loop;
1687                                 goto send_pkts;
1688                         }
1689                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1690                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1691                                         frames_to_send = loop;
1692                                         nb_ops = loop;
1693                                         goto send_pkts;
1694                                 }
1695                         } else if (unlikely(ses->qp[rte_lcore_id() %
1696                                                 MAX_DPAA_CORES] != qp)) {
1697                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1698                                         " New qp = %p\n",
1699                                         ses->qp[rte_lcore_id() %
1700                                         MAX_DPAA_CORES], qp);
1701                                 frames_to_send = loop;
1702                                 nb_ops = loop;
1703                                 goto send_pkts;
1704                         }
1705
1706                         auth_only_len = op->sym->auth.data.length -
1707                                                 op->sym->cipher.data.length;
1708                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1709                                 if (is_proto_ipsec(ses)) {
1710                                         cf = build_proto(op, ses);
1711                                 } else if (is_proto_pdcp(ses)) {
1712                                         cf = build_proto(op, ses);
1713                                 } else if (is_auth_only(ses)) {
1714                                         cf = build_auth_only(op, ses);
1715                                 } else if (is_cipher_only(ses)) {
1716                                         cf = build_cipher_only(op, ses);
1717                                 } else if (is_aead(ses)) {
1718                                         cf = build_cipher_auth_gcm(op, ses);
1719                                         auth_only_len = ses->auth_only_len;
1720                                 } else if (is_auth_cipher(ses)) {
1721                                         cf = build_cipher_auth(op, ses);
1722                                 } else {
1723                                         DPAA_SEC_DP_ERR("not supported ops");
1724                                         frames_to_send = loop;
1725                                         nb_ops = loop;
1726                                         goto send_pkts;
1727                                 }
1728                         } else {
1729                                 if (is_auth_only(ses)) {
1730                                         cf = build_auth_only_sg(op, ses);
1731                                 } else if (is_cipher_only(ses)) {
1732                                         cf = build_cipher_only_sg(op, ses);
1733                                 } else if (is_aead(ses)) {
1734                                         cf = build_cipher_auth_gcm_sg(op, ses);
1735                                         auth_only_len = ses->auth_only_len;
1736                                 } else if (is_auth_cipher(ses)) {
1737                                         cf = build_cipher_auth_sg(op, ses);
1738                                 } else {
1739                                         DPAA_SEC_DP_ERR("not supported ops");
1740                                         frames_to_send = loop;
1741                                         nb_ops = loop;
1742                                         goto send_pkts;
1743                                 }
1744                         }
1745                         if (unlikely(!cf)) {
1746                                 frames_to_send = loop;
1747                                 nb_ops = loop;
1748                                 goto send_pkts;
1749                         }
1750
1751                         fd = &fds[loop];
1752                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1753                         fd->opaque_addr = 0;
1754                         fd->cmd = 0;
1755                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1756                         fd->_format1 = qm_fd_compound;
1757                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1758                         /* Auth_only_len is set as 0 in descriptor and it is
1759                          * overwritten here in the fd.cmd which will update
1760                          * the DPOVRD reg.
1761                          */
1762                         if (auth_only_len)
1763                                 fd->cmd = 0x80000000 | auth_only_len;
1764
1765                 }
1766 send_pkts:
1767                 loop = 0;
1768                 while (loop < frames_to_send) {
1769                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1770                                         frames_to_send - loop);
1771                 }
1772                 nb_ops -= frames_to_send;
1773                 num_tx += frames_to_send;
1774         }
1775
1776         dpaa_qp->tx_pkts += num_tx;
1777         dpaa_qp->tx_errs += nb_ops - num_tx;
1778
1779         return num_tx;
1780 }
1781
1782 static uint16_t
1783 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1784                        uint16_t nb_ops)
1785 {
1786         uint16_t num_rx;
1787         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1788
1789         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1790
1791         dpaa_qp->rx_pkts += num_rx;
1792         dpaa_qp->rx_errs += nb_ops - num_rx;
1793
1794         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1795
1796         return num_rx;
1797 }
1798
1799 /** Release queue pair */
1800 static int
1801 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1802                             uint16_t qp_id)
1803 {
1804         struct dpaa_sec_dev_private *internals;
1805         struct dpaa_sec_qp *qp = NULL;
1806
1807         PMD_INIT_FUNC_TRACE();
1808
1809         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1810
1811         internals = dev->data->dev_private;
1812         if (qp_id >= internals->max_nb_queue_pairs) {
1813                 DPAA_SEC_ERR("Max supported qpid %d",
1814                              internals->max_nb_queue_pairs);
1815                 return -EINVAL;
1816         }
1817
1818         qp = &internals->qps[qp_id];
1819         qp->internals = NULL;
1820         dev->data->queue_pairs[qp_id] = NULL;
1821
1822         return 0;
1823 }
1824
1825 /** Setup a queue pair */
1826 static int
1827 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1828                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1829                 __rte_unused int socket_id)
1830 {
1831         struct dpaa_sec_dev_private *internals;
1832         struct dpaa_sec_qp *qp = NULL;
1833
1834         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1835
1836         internals = dev->data->dev_private;
1837         if (qp_id >= internals->max_nb_queue_pairs) {
1838                 DPAA_SEC_ERR("Max supported qpid %d",
1839                              internals->max_nb_queue_pairs);
1840                 return -EINVAL;
1841         }
1842
1843         qp = &internals->qps[qp_id];
1844         qp->internals = internals;
1845         dev->data->queue_pairs[qp_id] = qp;
1846
1847         return 0;
1848 }
1849
1850 /** Return the number of allocated queue pairs */
1851 static uint32_t
1852 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1853 {
1854         PMD_INIT_FUNC_TRACE();
1855
1856         return dev->data->nb_queue_pairs;
1857 }
1858
1859 /** Returns the size of session structure */
1860 static unsigned int
1861 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1862 {
1863         PMD_INIT_FUNC_TRACE();
1864
1865         return sizeof(dpaa_sec_session);
1866 }
1867
1868 static int
1869 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1870                      struct rte_crypto_sym_xform *xform,
1871                      dpaa_sec_session *session)
1872 {
1873         session->cipher_alg = xform->cipher.algo;
1874         session->iv.length = xform->cipher.iv.length;
1875         session->iv.offset = xform->cipher.iv.offset;
1876         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1877                                                RTE_CACHE_LINE_SIZE);
1878         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1879                 DPAA_SEC_ERR("No Memory for cipher key");
1880                 return -ENOMEM;
1881         }
1882         session->cipher_key.length = xform->cipher.key.length;
1883
1884         memcpy(session->cipher_key.data, xform->cipher.key.data,
1885                xform->cipher.key.length);
1886         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1887                         DIR_ENC : DIR_DEC;
1888
1889         return 0;
1890 }
1891
1892 static int
1893 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1894                    struct rte_crypto_sym_xform *xform,
1895                    dpaa_sec_session *session)
1896 {
1897         session->auth_alg = xform->auth.algo;
1898         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1899                                              RTE_CACHE_LINE_SIZE);
1900         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1901                 DPAA_SEC_ERR("No Memory for auth key");
1902                 return -ENOMEM;
1903         }
1904         session->auth_key.length = xform->auth.key.length;
1905         session->digest_length = xform->auth.digest_length;
1906
1907         memcpy(session->auth_key.data, xform->auth.key.data,
1908                xform->auth.key.length);
1909         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1910                         DIR_ENC : DIR_DEC;
1911
1912         return 0;
1913 }
1914
1915 static int
1916 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1917                    struct rte_crypto_sym_xform *xform,
1918                    dpaa_sec_session *session)
1919 {
1920         session->aead_alg = xform->aead.algo;
1921         session->iv.length = xform->aead.iv.length;
1922         session->iv.offset = xform->aead.iv.offset;
1923         session->auth_only_len = xform->aead.aad_length;
1924         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1925                                              RTE_CACHE_LINE_SIZE);
1926         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1927                 DPAA_SEC_ERR("No Memory for aead key\n");
1928                 return -ENOMEM;
1929         }
1930         session->aead_key.length = xform->aead.key.length;
1931         session->digest_length = xform->aead.digest_length;
1932
1933         memcpy(session->aead_key.data, xform->aead.key.data,
1934                xform->aead.key.length);
1935         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1936                         DIR_ENC : DIR_DEC;
1937
1938         return 0;
1939 }
1940
1941 static struct qman_fq *
1942 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1943 {
1944         unsigned int i;
1945
1946         for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
1947                 if (qi->inq_attach[i] == 0) {
1948                         qi->inq_attach[i] = 1;
1949                         return &qi->inq[i];
1950                 }
1951         }
1952         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
1953
1954         return NULL;
1955 }
1956
1957 static int
1958 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1959 {
1960         unsigned int i;
1961
1962         for (i = 0; i < qi->max_nb_sessions; i++) {
1963                 if (&qi->inq[i] == fq) {
1964                         qman_retire_fq(fq, NULL);
1965                         qman_oos_fq(fq);
1966                         qi->inq_attach[i] = 0;
1967                         return 0;
1968                 }
1969         }
1970         return -1;
1971 }
1972
1973 static int
1974 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1975 {
1976         int ret;
1977
1978         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
1979         ret = dpaa_sec_prep_cdb(sess);
1980         if (ret) {
1981                 DPAA_SEC_ERR("Unable to prepare sec cdb");
1982                 return -1;
1983         }
1984         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1985                 ret = rte_dpaa_portal_init((void *)0);
1986                 if (ret) {
1987                         DPAA_SEC_ERR("Failure in affining portal");
1988                         return ret;
1989                 }
1990         }
1991         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
1992                                dpaa_mem_vtop(&sess->cdb),
1993                                qman_fq_fqid(&qp->outq));
1994         if (ret)
1995                 DPAA_SEC_ERR("Unable to init sec queue");
1996
1997         return ret;
1998 }
1999
2000 static int
2001 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2002                             struct rte_crypto_sym_xform *xform, void *sess)
2003 {
2004         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2005         dpaa_sec_session *session = sess;
2006         uint32_t i;
2007
2008         PMD_INIT_FUNC_TRACE();
2009
2010         if (unlikely(sess == NULL)) {
2011                 DPAA_SEC_ERR("invalid session struct");
2012                 return -EINVAL;
2013         }
2014         memset(session, 0, sizeof(dpaa_sec_session));
2015
2016         /* Default IV length = 0 */
2017         session->iv.length = 0;
2018
2019         /* Cipher Only */
2020         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2021                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2022                 dpaa_sec_cipher_init(dev, xform, session);
2023
2024         /* Authentication Only */
2025         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2026                    xform->next == NULL) {
2027                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2028                 dpaa_sec_auth_init(dev, xform, session);
2029
2030         /* Cipher then Authenticate */
2031         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2032                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2033                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2034                         dpaa_sec_cipher_init(dev, xform, session);
2035                         dpaa_sec_auth_init(dev, xform->next, session);
2036                 } else {
2037                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2038                         return -EINVAL;
2039                 }
2040
2041         /* Authenticate then Cipher */
2042         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2043                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2044                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2045                         dpaa_sec_auth_init(dev, xform, session);
2046                         dpaa_sec_cipher_init(dev, xform->next, session);
2047                 } else {
2048                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2049                         return -EINVAL;
2050                 }
2051
2052         /* AEAD operation for AES-GCM kind of Algorithms */
2053         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2054                    xform->next == NULL) {
2055                 dpaa_sec_aead_init(dev, xform, session);
2056
2057         } else {
2058                 DPAA_SEC_ERR("Invalid crypto type");
2059                 return -EINVAL;
2060         }
2061         session->ctx_pool = internals->ctx_pool;
2062         rte_spinlock_lock(&internals->lock);
2063         for (i = 0; i < MAX_DPAA_CORES; i++) {
2064                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2065                 if (session->inq[i] == NULL) {
2066                         DPAA_SEC_ERR("unable to attach sec queue");
2067                         rte_spinlock_unlock(&internals->lock);
2068                         goto err1;
2069                 }
2070         }
2071         rte_spinlock_unlock(&internals->lock);
2072
2073         return 0;
2074
2075 err1:
2076         rte_free(session->cipher_key.data);
2077         rte_free(session->auth_key.data);
2078         memset(session, 0, sizeof(dpaa_sec_session));
2079
2080         return -EINVAL;
2081 }
2082
2083 static int
2084 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2085                 struct rte_crypto_sym_xform *xform,
2086                 struct rte_cryptodev_sym_session *sess,
2087                 struct rte_mempool *mempool)
2088 {
2089         void *sess_private_data;
2090         int ret;
2091
2092         PMD_INIT_FUNC_TRACE();
2093
2094         if (rte_mempool_get(mempool, &sess_private_data)) {
2095                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2096                 return -ENOMEM;
2097         }
2098
2099         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2100         if (ret != 0) {
2101                 DPAA_SEC_ERR("failed to configure session parameters");
2102
2103                 /* Return session to mempool */
2104                 rte_mempool_put(mempool, sess_private_data);
2105                 return ret;
2106         }
2107
2108         set_sym_session_private_data(sess, dev->driver_id,
2109                         sess_private_data);
2110
2111
2112         return 0;
2113 }
2114
2115 static inline void
2116 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2117 {
2118         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2119         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2120         uint8_t i;
2121
2122         for (i = 0; i < MAX_DPAA_CORES; i++) {
2123                 if (s->inq[i])
2124                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2125                 s->inq[i] = NULL;
2126                 s->qp[i] = NULL;
2127         }
2128         rte_free(s->cipher_key.data);
2129         rte_free(s->auth_key.data);
2130         memset(s, 0, sizeof(dpaa_sec_session));
2131         rte_mempool_put(sess_mp, (void *)s);
2132 }
2133
2134 /** Clear the memory of session so it doesn't leave key material behind */
2135 static void
2136 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2137                 struct rte_cryptodev_sym_session *sess)
2138 {
2139         PMD_INIT_FUNC_TRACE();
2140         uint8_t index = dev->driver_id;
2141         void *sess_priv = get_sym_session_private_data(sess, index);
2142         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2143
2144         if (sess_priv) {
2145                 free_session_memory(dev, s);
2146                 set_sym_session_private_data(sess, index, NULL);
2147         }
2148 }
2149
2150 static int
2151 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2152                            struct rte_security_session_conf *conf,
2153                            void *sess)
2154 {
2155         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2156         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2157         struct rte_crypto_auth_xform *auth_xform = NULL;
2158         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2159         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2160         uint32_t i;
2161
2162         PMD_INIT_FUNC_TRACE();
2163
2164         memset(session, 0, sizeof(dpaa_sec_session));
2165         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2166                 cipher_xform = &conf->crypto_xform->cipher;
2167                 if (conf->crypto_xform->next)
2168                         auth_xform = &conf->crypto_xform->next->auth;
2169         } else {
2170                 auth_xform = &conf->crypto_xform->auth;
2171                 if (conf->crypto_xform->next)
2172                         cipher_xform = &conf->crypto_xform->next->cipher;
2173         }
2174         session->proto_alg = conf->protocol;
2175
2176         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2177                 session->cipher_key.data = rte_zmalloc(NULL,
2178                                                        cipher_xform->key.length,
2179                                                        RTE_CACHE_LINE_SIZE);
2180                 if (session->cipher_key.data == NULL &&
2181                                 cipher_xform->key.length > 0) {
2182                         DPAA_SEC_ERR("No Memory for cipher key");
2183                         return -ENOMEM;
2184                 }
2185                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2186                                 cipher_xform->key.length);
2187                 session->cipher_key.length = cipher_xform->key.length;
2188
2189                 switch (cipher_xform->algo) {
2190                 case RTE_CRYPTO_CIPHER_AES_CBC:
2191                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2192                 case RTE_CRYPTO_CIPHER_AES_CTR:
2193                         break;
2194                 default:
2195                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2196                                 cipher_xform->algo);
2197                         goto out;
2198                 }
2199                 session->cipher_alg = cipher_xform->algo;
2200         } else {
2201                 session->cipher_key.data = NULL;
2202                 session->cipher_key.length = 0;
2203                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2204         }
2205
2206         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2207                 session->auth_key.data = rte_zmalloc(NULL,
2208                                                 auth_xform->key.length,
2209                                                 RTE_CACHE_LINE_SIZE);
2210                 if (session->auth_key.data == NULL &&
2211                                 auth_xform->key.length > 0) {
2212                         DPAA_SEC_ERR("No Memory for auth key");
2213                         rte_free(session->cipher_key.data);
2214                         return -ENOMEM;
2215                 }
2216                 memcpy(session->auth_key.data, auth_xform->key.data,
2217                                 auth_xform->key.length);
2218                 session->auth_key.length = auth_xform->key.length;
2219
2220                 switch (auth_xform->algo) {
2221                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2222                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2223                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2224                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2225                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2226                 case RTE_CRYPTO_AUTH_AES_CMAC:
2227                         break;
2228                 default:
2229                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2230                                 auth_xform->algo);
2231                         goto out;
2232                 }
2233                 session->auth_alg = auth_xform->algo;
2234         } else {
2235                 session->auth_key.data = NULL;
2236                 session->auth_key.length = 0;
2237                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2238         }
2239
2240         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2241                 if (ipsec_xform->tunnel.type ==
2242                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2243                         memset(&session->encap_pdb, 0,
2244                                 sizeof(struct ipsec_encap_pdb) +
2245                                 sizeof(session->ip4_hdr));
2246                         session->ip4_hdr.ip_v = IPVERSION;
2247                         session->ip4_hdr.ip_hl = 5;
2248                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2249                                                 sizeof(session->ip4_hdr));
2250                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2251                         session->ip4_hdr.ip_id = 0;
2252                         session->ip4_hdr.ip_off = 0;
2253                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2254                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2255                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2256                                         IPPROTO_ESP : IPPROTO_AH;
2257                         session->ip4_hdr.ip_sum = 0;
2258                         session->ip4_hdr.ip_src =
2259                                         ipsec_xform->tunnel.ipv4.src_ip;
2260                         session->ip4_hdr.ip_dst =
2261                                         ipsec_xform->tunnel.ipv4.dst_ip;
2262                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2263                                                 (void *)&session->ip4_hdr,
2264                                                 sizeof(struct ip));
2265                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2266                 } else if (ipsec_xform->tunnel.type ==
2267                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2268                         memset(&session->encap_pdb, 0,
2269                                 sizeof(struct ipsec_encap_pdb) +
2270                                 sizeof(session->ip6_hdr));
2271                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2272                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2273                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2274                                         RTE_IPV6_HDR_TC_SHIFT) &
2275                                         RTE_IPV6_HDR_TC_MASK) |
2276                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2277                                         RTE_IPV6_HDR_FL_SHIFT) &
2278                                         RTE_IPV6_HDR_FL_MASK));
2279                         /* Payload length will be updated by HW */
2280                         session->ip6_hdr.payload_len = 0;
2281                         session->ip6_hdr.hop_limits =
2282                                         ipsec_xform->tunnel.ipv6.hlimit;
2283                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2284                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2285                                         IPPROTO_ESP : IPPROTO_AH;
2286                         memcpy(&session->ip6_hdr.src_addr,
2287                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2288                         memcpy(&session->ip6_hdr.dst_addr,
2289                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2290                         session->encap_pdb.ip_hdr_len =
2291                                                 sizeof(struct rte_ipv6_hdr);
2292                 }
2293                 session->encap_pdb.options =
2294                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2295                         PDBOPTS_ESP_OIHI_PDB_INL |
2296                         PDBOPTS_ESP_IVSRC |
2297                         PDBHMO_ESP_ENCAP_DTTL |
2298                         PDBHMO_ESP_SNR;
2299                 if (ipsec_xform->options.esn)
2300                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2301                 session->encap_pdb.spi = ipsec_xform->spi;
2302                 session->dir = DIR_ENC;
2303         } else if (ipsec_xform->direction ==
2304                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2305                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2306                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2307                         session->decap_pdb.options = sizeof(struct ip) << 16;
2308                 else
2309                         session->decap_pdb.options =
2310                                         sizeof(struct rte_ipv6_hdr) << 16;
2311                 if (ipsec_xform->options.esn)
2312                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2313                 session->dir = DIR_DEC;
2314         } else
2315                 goto out;
2316         session->ctx_pool = internals->ctx_pool;
2317         rte_spinlock_lock(&internals->lock);
2318         for (i = 0; i < MAX_DPAA_CORES; i++) {
2319                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2320                 if (session->inq[i] == NULL) {
2321                         DPAA_SEC_ERR("unable to attach sec queue");
2322                         rte_spinlock_unlock(&internals->lock);
2323                         goto out;
2324                 }
2325         }
2326         rte_spinlock_unlock(&internals->lock);
2327
2328         return 0;
2329 out:
2330         rte_free(session->auth_key.data);
2331         rte_free(session->cipher_key.data);
2332         memset(session, 0, sizeof(dpaa_sec_session));
2333         return -1;
2334 }
2335
2336 static int
2337 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2338                           struct rte_security_session_conf *conf,
2339                           void *sess)
2340 {
2341         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2342         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2343         struct rte_crypto_auth_xform *auth_xform = NULL;
2344         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2345         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2346         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2347         uint32_t i;
2348
2349         PMD_INIT_FUNC_TRACE();
2350
2351         memset(session, 0, sizeof(dpaa_sec_session));
2352
2353         /* find xfrm types */
2354         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2355                 cipher_xform = &xform->cipher;
2356                 if (xform->next != NULL)
2357                         auth_xform = &xform->next->auth;
2358         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2359                 auth_xform = &xform->auth;
2360                 if (xform->next != NULL)
2361                         cipher_xform = &xform->next->cipher;
2362         } else {
2363                 DPAA_SEC_ERR("Invalid crypto type");
2364                 return -EINVAL;
2365         }
2366
2367         session->proto_alg = conf->protocol;
2368         if (cipher_xform) {
2369                 session->cipher_key.data = rte_zmalloc(NULL,
2370                                                cipher_xform->key.length,
2371                                                RTE_CACHE_LINE_SIZE);
2372                 if (session->cipher_key.data == NULL &&
2373                                 cipher_xform->key.length > 0) {
2374                         DPAA_SEC_ERR("No Memory for cipher key");
2375                         return -ENOMEM;
2376                 }
2377                 session->cipher_key.length = cipher_xform->key.length;
2378                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2379                         cipher_xform->key.length);
2380                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2381                                         DIR_ENC : DIR_DEC;
2382                 session->cipher_alg = cipher_xform->algo;
2383         } else {
2384                 session->cipher_key.data = NULL;
2385                 session->cipher_key.length = 0;
2386                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2387                 session->dir = DIR_ENC;
2388         }
2389
2390         /* Auth is only applicable for control mode operation. */
2391         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2392                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2393                         DPAA_SEC_ERR(
2394                                 "PDCP Seq Num size should be 5 bits for cmode");
2395                         goto out;
2396                 }
2397                 if (auth_xform) {
2398                         session->auth_key.data = rte_zmalloc(NULL,
2399                                                         auth_xform->key.length,
2400                                                         RTE_CACHE_LINE_SIZE);
2401                         if (session->auth_key.data == NULL &&
2402                                         auth_xform->key.length > 0) {
2403                                 DPAA_SEC_ERR("No Memory for auth key");
2404                                 rte_free(session->cipher_key.data);
2405                                 return -ENOMEM;
2406                         }
2407                         session->auth_key.length = auth_xform->key.length;
2408                         memcpy(session->auth_key.data, auth_xform->key.data,
2409                                         auth_xform->key.length);
2410                         session->auth_alg = auth_xform->algo;
2411                 } else {
2412                         session->auth_key.data = NULL;
2413                         session->auth_key.length = 0;
2414                         session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2415                 }
2416         }
2417         session->pdcp.domain = pdcp_xform->domain;
2418         session->pdcp.bearer = pdcp_xform->bearer;
2419         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2420         session->pdcp.sn_size = pdcp_xform->sn_size;
2421 #ifdef ENABLE_HFN_OVERRIDE
2422         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2423 #endif
2424         session->pdcp.hfn = pdcp_xform->hfn;
2425         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2426
2427         session->ctx_pool = dev_priv->ctx_pool;
2428         rte_spinlock_lock(&dev_priv->lock);
2429         for (i = 0; i < MAX_DPAA_CORES; i++) {
2430                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2431                 if (session->inq[i] == NULL) {
2432                         DPAA_SEC_ERR("unable to attach sec queue");
2433                         rte_spinlock_unlock(&dev_priv->lock);
2434                         goto out;
2435                 }
2436         }
2437         rte_spinlock_unlock(&dev_priv->lock);
2438         return 0;
2439 out:
2440         rte_free(session->auth_key.data);
2441         rte_free(session->cipher_key.data);
2442         memset(session, 0, sizeof(dpaa_sec_session));
2443         return -1;
2444 }
2445
2446 static int
2447 dpaa_sec_security_session_create(void *dev,
2448                                  struct rte_security_session_conf *conf,
2449                                  struct rte_security_session *sess,
2450                                  struct rte_mempool *mempool)
2451 {
2452         void *sess_private_data;
2453         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2454         int ret;
2455
2456         if (rte_mempool_get(mempool, &sess_private_data)) {
2457                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2458                 return -ENOMEM;
2459         }
2460
2461         switch (conf->protocol) {
2462         case RTE_SECURITY_PROTOCOL_IPSEC:
2463                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2464                                 sess_private_data);
2465                 break;
2466         case RTE_SECURITY_PROTOCOL_PDCP:
2467                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2468                                 sess_private_data);
2469                 break;
2470         case RTE_SECURITY_PROTOCOL_MACSEC:
2471                 return -ENOTSUP;
2472         default:
2473                 return -EINVAL;
2474         }
2475         if (ret != 0) {
2476                 DPAA_SEC_ERR("failed to configure session parameters");
2477                 /* Return session to mempool */
2478                 rte_mempool_put(mempool, sess_private_data);
2479                 return ret;
2480         }
2481
2482         set_sec_session_private_data(sess, sess_private_data);
2483
2484         return ret;
2485 }
2486
2487 /** Clear the memory of session so it doesn't leave key material behind */
2488 static int
2489 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2490                 struct rte_security_session *sess)
2491 {
2492         PMD_INIT_FUNC_TRACE();
2493         void *sess_priv = get_sec_session_private_data(sess);
2494         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2495
2496         if (sess_priv) {
2497                 free_session_memory((struct rte_cryptodev *)dev, s);
2498                 set_sec_session_private_data(sess, NULL);
2499         }
2500         return 0;
2501 }
2502
2503 static int
2504 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2505                        struct rte_cryptodev_config *config __rte_unused)
2506 {
2507
2508         char str[20];
2509         struct dpaa_sec_dev_private *internals;
2510
2511         PMD_INIT_FUNC_TRACE();
2512
2513         internals = dev->data->dev_private;
2514         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2515         if (!internals->ctx_pool) {
2516                 internals->ctx_pool = rte_mempool_create((const char *)str,
2517                                                         CTX_POOL_NUM_BUFS,
2518                                                         CTX_POOL_BUF_SIZE,
2519                                                         CTX_POOL_CACHE_SIZE, 0,
2520                                                         NULL, NULL, NULL, NULL,
2521                                                         SOCKET_ID_ANY, 0);
2522                 if (!internals->ctx_pool) {
2523                         DPAA_SEC_ERR("%s create failed\n", str);
2524                         return -ENOMEM;
2525                 }
2526         } else
2527                 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2528                                 dev->data->dev_id);
2529
2530         return 0;
2531 }
2532
2533 static int
2534 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2535 {
2536         PMD_INIT_FUNC_TRACE();
2537         return 0;
2538 }
2539
2540 static void
2541 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2542 {
2543         PMD_INIT_FUNC_TRACE();
2544 }
2545
2546 static int
2547 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2548 {
2549         struct dpaa_sec_dev_private *internals;
2550
2551         PMD_INIT_FUNC_TRACE();
2552
2553         if (dev == NULL)
2554                 return -ENOMEM;
2555
2556         internals = dev->data->dev_private;
2557         rte_mempool_free(internals->ctx_pool);
2558         internals->ctx_pool = NULL;
2559
2560         return 0;
2561 }
2562
2563 static void
2564 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2565                        struct rte_cryptodev_info *info)
2566 {
2567         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2568
2569         PMD_INIT_FUNC_TRACE();
2570         if (info != NULL) {
2571                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2572                 info->feature_flags = dev->feature_flags;
2573                 info->capabilities = dpaa_sec_capabilities;
2574                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2575                 info->driver_id = cryptodev_driver_id;
2576         }
2577 }
2578
2579 static struct rte_cryptodev_ops crypto_ops = {
2580         .dev_configure        = dpaa_sec_dev_configure,
2581         .dev_start            = dpaa_sec_dev_start,
2582         .dev_stop             = dpaa_sec_dev_stop,
2583         .dev_close            = dpaa_sec_dev_close,
2584         .dev_infos_get        = dpaa_sec_dev_infos_get,
2585         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2586         .queue_pair_release   = dpaa_sec_queue_pair_release,
2587         .queue_pair_count     = dpaa_sec_queue_pair_count,
2588         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2589         .sym_session_configure    = dpaa_sec_sym_session_configure,
2590         .sym_session_clear        = dpaa_sec_sym_session_clear
2591 };
2592
2593 static const struct rte_security_capability *
2594 dpaa_sec_capabilities_get(void *device __rte_unused)
2595 {
2596         return dpaa_sec_security_cap;
2597 }
2598
2599 static const struct rte_security_ops dpaa_sec_security_ops = {
2600         .session_create = dpaa_sec_security_session_create,
2601         .session_update = NULL,
2602         .session_stats_get = NULL,
2603         .session_destroy = dpaa_sec_security_session_destroy,
2604         .set_pkt_metadata = NULL,
2605         .capabilities_get = dpaa_sec_capabilities_get
2606 };
2607
2608 static int
2609 dpaa_sec_uninit(struct rte_cryptodev *dev)
2610 {
2611         struct dpaa_sec_dev_private *internals;
2612
2613         if (dev == NULL)
2614                 return -ENODEV;
2615
2616         internals = dev->data->dev_private;
2617         rte_free(dev->security_ctx);
2618
2619         /* In case close has been called, internals->ctx_pool would be NULL */
2620         rte_mempool_free(internals->ctx_pool);
2621         rte_free(internals);
2622
2623         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2624                       dev->data->name, rte_socket_id());
2625
2626         return 0;
2627 }
2628
2629 static int
2630 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2631 {
2632         struct dpaa_sec_dev_private *internals;
2633         struct rte_security_ctx *security_instance;
2634         struct dpaa_sec_qp *qp;
2635         uint32_t i, flags;
2636         int ret;
2637
2638         PMD_INIT_FUNC_TRACE();
2639
2640         cryptodev->driver_id = cryptodev_driver_id;
2641         cryptodev->dev_ops = &crypto_ops;
2642
2643         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2644         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2645         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2646                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2647                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2648                         RTE_CRYPTODEV_FF_SECURITY |
2649                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2650                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2651                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2652                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2653                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2654
2655         internals = cryptodev->data->dev_private;
2656         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2657         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2658
2659         /*
2660          * For secondary processes, we don't initialise any further as primary
2661          * has already done this work. Only check we don't need a different
2662          * RX function
2663          */
2664         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2665                 DPAA_SEC_WARN("Device already init by primary process");
2666                 return 0;
2667         }
2668
2669         /* Initialize security_ctx only for primary process*/
2670         security_instance = rte_malloc("rte_security_instances_ops",
2671                                 sizeof(struct rte_security_ctx), 0);
2672         if (security_instance == NULL)
2673                 return -ENOMEM;
2674         security_instance->device = (void *)cryptodev;
2675         security_instance->ops = &dpaa_sec_security_ops;
2676         security_instance->sess_cnt = 0;
2677         cryptodev->security_ctx = security_instance;
2678
2679         rte_spinlock_init(&internals->lock);
2680         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2681                 /* init qman fq for queue pair */
2682                 qp = &internals->qps[i];
2683                 ret = dpaa_sec_init_tx(&qp->outq);
2684                 if (ret) {
2685                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2686                         goto init_error;
2687                 }
2688         }
2689
2690         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2691                 QMAN_FQ_FLAG_TO_DCPORTAL;
2692         for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2693                 /* create rx qman fq for sessions*/
2694                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2695                 if (unlikely(ret != 0)) {
2696                         DPAA_SEC_ERR("sec qman_create_fq failed");
2697                         goto init_error;
2698                 }
2699         }
2700
2701         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2702         return 0;
2703
2704 init_error:
2705         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2706
2707         dpaa_sec_uninit(cryptodev);
2708         return -EFAULT;
2709 }
2710
2711 static int
2712 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2713                                 struct rte_dpaa_device *dpaa_dev)
2714 {
2715         struct rte_cryptodev *cryptodev;
2716         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2717
2718         int retval;
2719
2720         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2721                         dpaa_dev->id.dev_id);
2722
2723         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2724         if (cryptodev == NULL)
2725                 return -ENOMEM;
2726
2727         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2728                 cryptodev->data->dev_private = rte_zmalloc_socket(
2729                                         "cryptodev private structure",
2730                                         sizeof(struct dpaa_sec_dev_private),
2731                                         RTE_CACHE_LINE_SIZE,
2732                                         rte_socket_id());
2733
2734                 if (cryptodev->data->dev_private == NULL)
2735                         rte_panic("Cannot allocate memzone for private "
2736                                         "device data");
2737         }
2738
2739         dpaa_dev->crypto_dev = cryptodev;
2740         cryptodev->device = &dpaa_dev->device;
2741
2742         /* init user callbacks */
2743         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2744
2745         /* if sec device version is not configured */
2746         if (!rta_get_sec_era()) {
2747                 const struct device_node *caam_node;
2748
2749                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2750                         const uint32_t *prop = of_get_property(caam_node,
2751                                         "fsl,sec-era",
2752                                         NULL);
2753                         if (prop) {
2754                                 rta_set_sec_era(
2755                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2756                                 break;
2757                         }
2758                 }
2759         }
2760
2761         /* Invoke PMD device initialization function */
2762         retval = dpaa_sec_dev_init(cryptodev);
2763         if (retval == 0)
2764                 return 0;
2765
2766         /* In case of error, cleanup is done */
2767         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2768                 rte_free(cryptodev->data->dev_private);
2769
2770         rte_cryptodev_pmd_release_device(cryptodev);
2771
2772         return -ENXIO;
2773 }
2774
2775 static int
2776 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2777 {
2778         struct rte_cryptodev *cryptodev;
2779         int ret;
2780
2781         cryptodev = dpaa_dev->crypto_dev;
2782         if (cryptodev == NULL)
2783                 return -ENODEV;
2784
2785         ret = dpaa_sec_uninit(cryptodev);
2786         if (ret)
2787                 return ret;
2788
2789         return rte_cryptodev_pmd_destroy(cryptodev);
2790 }
2791
2792 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2793         .drv_type = FSL_DPAA_CRYPTO,
2794         .driver = {
2795                 .name = "DPAA SEC PMD"
2796         },
2797         .probe = cryptodev_dpaa_sec_probe,
2798         .remove = cryptodev_dpaa_sec_remove,
2799 };
2800
2801 static struct cryptodev_driver dpaa_sec_crypto_drv;
2802
2803 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2804 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2805                 cryptodev_driver_id);
2806
2807 RTE_INIT(dpaa_sec_init_log)
2808 {
2809         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2810         if (dpaa_logtype_sec >= 0)
2811                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2812 }