ea0b2054a0a1527546eacf2e1006415459695f33
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_log.h>
41 #include <dpaax_iova_table.h>
42
43 enum rta_sec_era rta_sec_era;
44
45 int dpaa_logtype_sec;
46
47 static uint8_t cryptodev_driver_id;
48
49 static __thread struct rte_crypto_op **dpaa_sec_ops;
50 static __thread int dpaa_sec_op_nb;
51
52 static int
53 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54
55 static inline void
56 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
57 {
58         if (!ctx->fd_status) {
59                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
60         } else {
61                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
62                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63         }
64
65         /* report op status to sym->op and then free the ctx memory  */
66         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
67 }
68
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
71 {
72         struct dpaa_sec_op_ctx *ctx;
73         int i, retval;
74
75         retval = rte_mempool_get(
76                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
77                         (void **)(&ctx));
78         if (!ctx || retval) {
79                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
80                 return NULL;
81         }
82         /*
83          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86          * each packet, memset is costlier than dcbz_64().
87          */
88         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89                 dcbz_64(&ctx->job.sg[i]);
90
91         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93
94         return ctx;
95 }
96
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
99 {
100         const struct rte_memseg *ms;
101
102         ms = rte_mem_virt2memseg(vaddr, NULL);
103         if (ms) {
104                 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
106         }
107         return (size_t)NULL;
108 }
109
110 static inline void *
111 dpaa_mem_ptov(rte_iova_t paddr)
112 {
113         void *va;
114
115         va = (void *)dpaax_iova_table_get_va(paddr);
116         if (likely(va))
117                 return va;
118
119         return rte_mem_iova2virt(paddr);
120 }
121
122 static void
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
124                    struct qman_fq *fq,
125                    const struct qm_mr_entry *msg)
126 {
127         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 }
130
131 /* initialize the queue with dest chan as caam chan so that
132  * all the packets in this queue could be dispatched into caam
133  */
134 static int
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136                  uint32_t fqid_out)
137 {
138         struct qm_mcc_initfq fq_opts;
139         uint32_t flags;
140         int ret = -1;
141
142         /* Clear FQ options */
143         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
144
145         flags = QMAN_INITFQ_FLAG_SCHED;
146         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147                           QM_INITFQ_WE_CONTEXTB;
148
149         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150         fq_opts.fqd.context_b = fqid_out;
151         fq_opts.fqd.dest.channel = qm_channel_caam;
152         fq_opts.fqd.dest.wq = 0;
153
154         fq_in->cb.ern  = ern_sec_fq_handler;
155
156         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
157
158         ret = qman_init_fq(fq_in, flags, &fq_opts);
159         if (unlikely(ret != 0))
160                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
161
162         return ret;
163 }
164
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168                   struct qman_fq *fq __always_unused,
169                   const struct qm_dqrr_entry *dqrr)
170 {
171         const struct qm_fd *fd;
172         struct dpaa_sec_job *job;
173         struct dpaa_sec_op_ctx *ctx;
174
175         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176                 return qman_cb_dqrr_defer;
177
178         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179                 return qman_cb_dqrr_consume;
180
181         fd = &dqrr->fd;
182         /* sg is embedded in an op ctx,
183          * sg[0] is for output
184          * sg[1] for input
185          */
186         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
187
188         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189         ctx->fd_status = fd->status;
190         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191                 struct qm_sg_entry *sg_out;
192                 uint32_t len;
193                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
195
196                 sg_out = &job->sg[0];
197                 hw_sg_to_cpu(sg_out);
198                 len = sg_out->length;
199                 mbuf->pkt_len = len;
200                 while (mbuf->next != NULL) {
201                         len -= mbuf->data_len;
202                         mbuf = mbuf->next;
203                 }
204                 mbuf->data_len = len;
205         }
206         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207         dpaa_sec_op_ending(ctx);
208
209         return qman_cb_dqrr_consume;
210 }
211
212 /* caam result is put into this queue */
213 static int
214 dpaa_sec_init_tx(struct qman_fq *fq)
215 {
216         int ret;
217         struct qm_mcc_initfq opts;
218         uint32_t flags;
219
220         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221                 QMAN_FQ_FLAG_DYNAMIC_FQID;
222
223         ret = qman_create_fq(0, flags, fq);
224         if (unlikely(ret)) {
225                 DPAA_SEC_ERR("qman_create_fq failed");
226                 return ret;
227         }
228
229         memset(&opts, 0, sizeof(opts));
230         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
232
233         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
234
235         fq->cb.dqrr = dqrr_out_fq_cb_rx;
236         fq->cb.ern  = ern_sec_fq_handler;
237
238         ret = qman_init_fq(fq, 0, &opts);
239         if (unlikely(ret)) {
240                 DPAA_SEC_ERR("unable to init caam source fq!");
241                 return ret;
242         }
243
244         return ret;
245 }
246
247 static inline int is_cipher_only(dpaa_sec_session *ses)
248 {
249         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
250                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
251 }
252
253 static inline int is_auth_only(dpaa_sec_session *ses)
254 {
255         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
256                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
257 }
258
259 static inline int is_aead(dpaa_sec_session *ses)
260 {
261         return ((ses->cipher_alg == 0) &&
262                 (ses->auth_alg == 0) &&
263                 (ses->aead_alg != 0));
264 }
265
266 static inline int is_auth_cipher(dpaa_sec_session *ses)
267 {
268         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
269                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
270                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
271                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
272 }
273
274 static inline int is_proto_ipsec(dpaa_sec_session *ses)
275 {
276         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
277 }
278
279 static inline int is_proto_pdcp(dpaa_sec_session *ses)
280 {
281         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
282 }
283
284 static inline int is_encode(dpaa_sec_session *ses)
285 {
286         return ses->dir == DIR_ENC;
287 }
288
289 static inline int is_decode(dpaa_sec_session *ses)
290 {
291         return ses->dir == DIR_DEC;
292 }
293
294 static inline void
295 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
296 {
297         switch (ses->auth_alg) {
298         case RTE_CRYPTO_AUTH_NULL:
299                 alginfo_a->algtype =
300                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301                         OP_PCL_IPSEC_HMAC_NULL : 0;
302                 ses->digest_length = 0;
303                 break;
304         case RTE_CRYPTO_AUTH_MD5_HMAC:
305                 alginfo_a->algtype =
306                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
308                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309                 break;
310         case RTE_CRYPTO_AUTH_SHA1_HMAC:
311                 alginfo_a->algtype =
312                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
314                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
315                 break;
316         case RTE_CRYPTO_AUTH_SHA224_HMAC:
317                 alginfo_a->algtype =
318                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
319                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
320                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321                 break;
322         case RTE_CRYPTO_AUTH_SHA256_HMAC:
323                 alginfo_a->algtype =
324                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
325                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
326                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
327                 break;
328         case RTE_CRYPTO_AUTH_SHA384_HMAC:
329                 alginfo_a->algtype =
330                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
331                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
332                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
333                 break;
334         case RTE_CRYPTO_AUTH_SHA512_HMAC:
335                 alginfo_a->algtype =
336                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
337                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
338                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
339                 break;
340         default:
341                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
342         }
343 }
344
345 static inline void
346 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
347 {
348         switch (ses->cipher_alg) {
349         case RTE_CRYPTO_CIPHER_NULL:
350                 alginfo_c->algtype =
351                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
352                         OP_PCL_IPSEC_NULL : 0;
353                 break;
354         case RTE_CRYPTO_CIPHER_AES_CBC:
355                 alginfo_c->algtype =
356                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
357                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
358                 alginfo_c->algmode = OP_ALG_AAI_CBC;
359                 break;
360         case RTE_CRYPTO_CIPHER_3DES_CBC:
361                 alginfo_c->algtype =
362                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
363                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
364                 alginfo_c->algmode = OP_ALG_AAI_CBC;
365                 break;
366         case RTE_CRYPTO_CIPHER_AES_CTR:
367                 alginfo_c->algtype =
368                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
369                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
370                 alginfo_c->algmode = OP_ALG_AAI_CTR;
371                 break;
372         default:
373                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
374         }
375 }
376
377 static inline void
378 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
379 {
380         switch (ses->aead_alg) {
381         case RTE_CRYPTO_AEAD_AES_GCM:
382                 alginfo->algtype = OP_ALG_ALGSEL_AES;
383                 alginfo->algmode = OP_ALG_AAI_GCM;
384                 break;
385         default:
386                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
387         }
388 }
389
390 static int
391 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
392 {
393         struct alginfo authdata = {0}, cipherdata = {0};
394         struct sec_cdb *cdb = &ses->cdb;
395         struct alginfo *p_authdata = NULL;
396         int32_t shared_desc_len = 0;
397         int err;
398 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
399         int swap = false;
400 #else
401         int swap = true;
402 #endif
403
404         switch (ses->cipher_alg) {
405         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
406                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
407                 break;
408         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
409                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
410                 break;
411         case RTE_CRYPTO_CIPHER_AES_CTR:
412                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
413                 break;
414         case RTE_CRYPTO_CIPHER_NULL:
415                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
416                 break;
417         default:
418                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
419                               ses->cipher_alg);
420                 return -1;
421         }
422
423         cipherdata.key = (size_t)ses->cipher_key.data;
424         cipherdata.keylen = ses->cipher_key.length;
425         cipherdata.key_enc_flags = 0;
426         cipherdata.key_type = RTA_DATA_IMM;
427
428         cdb->sh_desc[0] = cipherdata.keylen;
429         cdb->sh_desc[1] = 0;
430         cdb->sh_desc[2] = 0;
431
432         if (ses->auth_alg) {
433                 switch (ses->auth_alg) {
434                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
435                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
436                         break;
437                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
438                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
439                         break;
440                 case RTE_CRYPTO_AUTH_AES_CMAC:
441                         authdata.algtype = PDCP_AUTH_TYPE_AES;
442                         break;
443                 case RTE_CRYPTO_AUTH_NULL:
444                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
445                         break;
446                 default:
447                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
448                                       ses->auth_alg);
449                         return -1;
450                 }
451
452                 authdata.key = (size_t)ses->auth_key.data;
453                 authdata.keylen = ses->auth_key.length;
454                 authdata.key_enc_flags = 0;
455                 authdata.key_type = RTA_DATA_IMM;
456
457                 p_authdata = &authdata;
458
459                 cdb->sh_desc[1] = authdata.keylen;
460         }
461
462         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
463                                MIN_JOB_DESC_SIZE,
464                                (unsigned int *)cdb->sh_desc,
465                                &cdb->sh_desc[2], 2);
466         if (err < 0) {
467                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
468                 return err;
469         }
470
471         if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
472                 cipherdata.key =
473                         (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
474                 cipherdata.key_type = RTA_DATA_PTR;
475         }
476         if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
477                 authdata.key =
478                         (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
479                 authdata.key_type = RTA_DATA_PTR;
480         }
481
482         cdb->sh_desc[0] = 0;
483         cdb->sh_desc[1] = 0;
484         cdb->sh_desc[2] = 0;
485
486         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
487                 if (ses->dir == DIR_ENC)
488                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
489                                         cdb->sh_desc, 1, swap,
490                                         ses->pdcp.hfn,
491                                         ses->pdcp.sn_size,
492                                         ses->pdcp.bearer,
493                                         ses->pdcp.pkt_dir,
494                                         ses->pdcp.hfn_threshold,
495                                         &cipherdata, &authdata,
496                                         0);
497                 else if (ses->dir == DIR_DEC)
498                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
499                                         cdb->sh_desc, 1, swap,
500                                         ses->pdcp.hfn,
501                                         ses->pdcp.sn_size,
502                                         ses->pdcp.bearer,
503                                         ses->pdcp.pkt_dir,
504                                         ses->pdcp.hfn_threshold,
505                                         &cipherdata, &authdata,
506                                         0);
507         } else {
508                 if (ses->dir == DIR_ENC)
509                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
510                                         cdb->sh_desc, 1, swap,
511                                         ses->pdcp.sn_size,
512                                         ses->pdcp.hfn,
513                                         ses->pdcp.bearer,
514                                         ses->pdcp.pkt_dir,
515                                         ses->pdcp.hfn_threshold,
516                                         &cipherdata, p_authdata, 0);
517                 else if (ses->dir == DIR_DEC)
518                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
519                                         cdb->sh_desc, 1, swap,
520                                         ses->pdcp.sn_size,
521                                         ses->pdcp.hfn,
522                                         ses->pdcp.bearer,
523                                         ses->pdcp.pkt_dir,
524                                         ses->pdcp.hfn_threshold,
525                                         &cipherdata, p_authdata, 0);
526         }
527
528         return shared_desc_len;
529 }
530
531 /* prepare ipsec proto command block of the session */
532 static int
533 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
534 {
535         struct alginfo cipherdata = {0}, authdata = {0};
536         struct sec_cdb *cdb = &ses->cdb;
537         int32_t shared_desc_len = 0;
538         int err;
539 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
540         int swap = false;
541 #else
542         int swap = true;
543 #endif
544
545         caam_cipher_alg(ses, &cipherdata);
546         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
547                 DPAA_SEC_ERR("not supported cipher alg");
548                 return -ENOTSUP;
549         }
550
551         cipherdata.key = (size_t)ses->cipher_key.data;
552         cipherdata.keylen = ses->cipher_key.length;
553         cipherdata.key_enc_flags = 0;
554         cipherdata.key_type = RTA_DATA_IMM;
555
556         caam_auth_alg(ses, &authdata);
557         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
558                 DPAA_SEC_ERR("not supported auth alg");
559                 return -ENOTSUP;
560         }
561
562         authdata.key = (size_t)ses->auth_key.data;
563         authdata.keylen = ses->auth_key.length;
564         authdata.key_enc_flags = 0;
565         authdata.key_type = RTA_DATA_IMM;
566
567         cdb->sh_desc[0] = cipherdata.keylen;
568         cdb->sh_desc[1] = authdata.keylen;
569         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
570                                MIN_JOB_DESC_SIZE,
571                                (unsigned int *)cdb->sh_desc,
572                                &cdb->sh_desc[2], 2);
573
574         if (err < 0) {
575                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
576                 return err;
577         }
578         if (cdb->sh_desc[2] & 1)
579                 cipherdata.key_type = RTA_DATA_IMM;
580         else {
581                 cipherdata.key = (size_t)dpaa_mem_vtop(
582                                         (void *)(size_t)cipherdata.key);
583                 cipherdata.key_type = RTA_DATA_PTR;
584         }
585         if (cdb->sh_desc[2] & (1<<1))
586                 authdata.key_type = RTA_DATA_IMM;
587         else {
588                 authdata.key = (size_t)dpaa_mem_vtop(
589                                         (void *)(size_t)authdata.key);
590                 authdata.key_type = RTA_DATA_PTR;
591         }
592
593         cdb->sh_desc[0] = 0;
594         cdb->sh_desc[1] = 0;
595         cdb->sh_desc[2] = 0;
596         if (ses->dir == DIR_ENC) {
597                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
598                                 cdb->sh_desc,
599                                 true, swap, SHR_SERIAL,
600                                 &ses->encap_pdb,
601                                 (uint8_t *)&ses->ip4_hdr,
602                                 &cipherdata, &authdata);
603         } else if (ses->dir == DIR_DEC) {
604                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
605                                 cdb->sh_desc,
606                                 true, swap, SHR_SERIAL,
607                                 &ses->decap_pdb,
608                                 &cipherdata, &authdata);
609         }
610         return shared_desc_len;
611 }
612
613 /* prepare command block of the session */
614 static int
615 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
616 {
617         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
618         int32_t shared_desc_len = 0;
619         struct sec_cdb *cdb = &ses->cdb;
620         int err;
621 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
622         int swap = false;
623 #else
624         int swap = true;
625 #endif
626
627         memset(cdb, 0, sizeof(struct sec_cdb));
628
629         if (is_proto_ipsec(ses)) {
630                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
631         } else if (is_proto_pdcp(ses)) {
632                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
633         } else if (is_cipher_only(ses)) {
634                 caam_cipher_alg(ses, &alginfo_c);
635                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
636                         DPAA_SEC_ERR("not supported cipher alg");
637                         return -ENOTSUP;
638                 }
639
640                 alginfo_c.key = (size_t)ses->cipher_key.data;
641                 alginfo_c.keylen = ses->cipher_key.length;
642                 alginfo_c.key_enc_flags = 0;
643                 alginfo_c.key_type = RTA_DATA_IMM;
644
645                 shared_desc_len = cnstr_shdsc_blkcipher(
646                                                 cdb->sh_desc, true,
647                                                 swap, SHR_NEVER, &alginfo_c,
648                                                 NULL,
649                                                 ses->iv.length,
650                                                 ses->dir);
651         } else if (is_auth_only(ses)) {
652                 caam_auth_alg(ses, &alginfo_a);
653                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
654                         DPAA_SEC_ERR("not supported auth alg");
655                         return -ENOTSUP;
656                 }
657
658                 alginfo_a.key = (size_t)ses->auth_key.data;
659                 alginfo_a.keylen = ses->auth_key.length;
660                 alginfo_a.key_enc_flags = 0;
661                 alginfo_a.key_type = RTA_DATA_IMM;
662
663                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
664                                                    swap, SHR_NEVER, &alginfo_a,
665                                                    !ses->dir,
666                                                    ses->digest_length);
667         } else if (is_aead(ses)) {
668                 caam_aead_alg(ses, &alginfo);
669                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
670                         DPAA_SEC_ERR("not supported aead alg");
671                         return -ENOTSUP;
672                 }
673                 alginfo.key = (size_t)ses->aead_key.data;
674                 alginfo.keylen = ses->aead_key.length;
675                 alginfo.key_enc_flags = 0;
676                 alginfo.key_type = RTA_DATA_IMM;
677
678                 if (ses->dir == DIR_ENC)
679                         shared_desc_len = cnstr_shdsc_gcm_encap(
680                                         cdb->sh_desc, true, swap, SHR_NEVER,
681                                         &alginfo,
682                                         ses->iv.length,
683                                         ses->digest_length);
684                 else
685                         shared_desc_len = cnstr_shdsc_gcm_decap(
686                                         cdb->sh_desc, true, swap, SHR_NEVER,
687                                         &alginfo,
688                                         ses->iv.length,
689                                         ses->digest_length);
690         } else {
691                 caam_cipher_alg(ses, &alginfo_c);
692                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
693                         DPAA_SEC_ERR("not supported cipher alg");
694                         return -ENOTSUP;
695                 }
696
697                 alginfo_c.key = (size_t)ses->cipher_key.data;
698                 alginfo_c.keylen = ses->cipher_key.length;
699                 alginfo_c.key_enc_flags = 0;
700                 alginfo_c.key_type = RTA_DATA_IMM;
701
702                 caam_auth_alg(ses, &alginfo_a);
703                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
704                         DPAA_SEC_ERR("not supported auth alg");
705                         return -ENOTSUP;
706                 }
707
708                 alginfo_a.key = (size_t)ses->auth_key.data;
709                 alginfo_a.keylen = ses->auth_key.length;
710                 alginfo_a.key_enc_flags = 0;
711                 alginfo_a.key_type = RTA_DATA_IMM;
712
713                 cdb->sh_desc[0] = alginfo_c.keylen;
714                 cdb->sh_desc[1] = alginfo_a.keylen;
715                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
716                                        MIN_JOB_DESC_SIZE,
717                                        (unsigned int *)cdb->sh_desc,
718                                        &cdb->sh_desc[2], 2);
719
720                 if (err < 0) {
721                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
722                         return err;
723                 }
724                 if (cdb->sh_desc[2] & 1)
725                         alginfo_c.key_type = RTA_DATA_IMM;
726                 else {
727                         alginfo_c.key = (size_t)dpaa_mem_vtop(
728                                                 (void *)(size_t)alginfo_c.key);
729                         alginfo_c.key_type = RTA_DATA_PTR;
730                 }
731                 if (cdb->sh_desc[2] & (1<<1))
732                         alginfo_a.key_type = RTA_DATA_IMM;
733                 else {
734                         alginfo_a.key = (size_t)dpaa_mem_vtop(
735                                                 (void *)(size_t)alginfo_a.key);
736                         alginfo_a.key_type = RTA_DATA_PTR;
737                 }
738                 cdb->sh_desc[0] = 0;
739                 cdb->sh_desc[1] = 0;
740                 cdb->sh_desc[2] = 0;
741                 /* Auth_only_len is set as 0 here and it will be
742                  * overwritten in fd for each packet.
743                  */
744                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
745                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
746                                 ses->iv.length, 0,
747                                 ses->digest_length, ses->dir);
748         }
749
750         if (shared_desc_len < 0) {
751                 DPAA_SEC_ERR("error in preparing command block");
752                 return shared_desc_len;
753         }
754
755         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
756         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
757         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
758
759         return 0;
760 }
761
762 /* qp is lockless, should be accessed by only one thread */
763 static int
764 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
765 {
766         struct qman_fq *fq;
767         unsigned int pkts = 0;
768         int num_rx_bufs, ret;
769         struct qm_dqrr_entry *dq;
770         uint32_t vdqcr_flags = 0;
771
772         fq = &qp->outq;
773         /*
774          * Until request for four buffers, we provide exact number of buffers.
775          * Otherwise we do not set the QM_VDQCR_EXACT flag.
776          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
777          * requested, so we request two less in this case.
778          */
779         if (nb_ops < 4) {
780                 vdqcr_flags = QM_VDQCR_EXACT;
781                 num_rx_bufs = nb_ops;
782         } else {
783                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
784                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
785         }
786         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
787         if (ret)
788                 return 0;
789
790         do {
791                 const struct qm_fd *fd;
792                 struct dpaa_sec_job *job;
793                 struct dpaa_sec_op_ctx *ctx;
794                 struct rte_crypto_op *op;
795
796                 dq = qman_dequeue(fq);
797                 if (!dq)
798                         continue;
799
800                 fd = &dq->fd;
801                 /* sg is embedded in an op ctx,
802                  * sg[0] is for output
803                  * sg[1] for input
804                  */
805                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
806
807                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
808                 ctx->fd_status = fd->status;
809                 op = ctx->op;
810                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
811                         struct qm_sg_entry *sg_out;
812                         uint32_t len;
813                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
814                                                 op->sym->m_src : op->sym->m_dst;
815
816                         sg_out = &job->sg[0];
817                         hw_sg_to_cpu(sg_out);
818                         len = sg_out->length;
819                         mbuf->pkt_len = len;
820                         while (mbuf->next != NULL) {
821                                 len -= mbuf->data_len;
822                                 mbuf = mbuf->next;
823                         }
824                         mbuf->data_len = len;
825                 }
826                 if (!ctx->fd_status) {
827                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
828                 } else {
829                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
830                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
831                 }
832                 ops[pkts++] = op;
833
834                 /* report op status to sym->op and then free the ctx memeory */
835                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
836
837                 qman_dqrr_consume(fq, dq);
838         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
839
840         return pkts;
841 }
842
843 static inline struct dpaa_sec_job *
844 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
845 {
846         struct rte_crypto_sym_op *sym = op->sym;
847         struct rte_mbuf *mbuf = sym->m_src;
848         struct dpaa_sec_job *cf;
849         struct dpaa_sec_op_ctx *ctx;
850         struct qm_sg_entry *sg, *out_sg, *in_sg;
851         phys_addr_t start_addr;
852         uint8_t *old_digest, extra_segs;
853
854         if (is_decode(ses))
855                 extra_segs = 3;
856         else
857                 extra_segs = 2;
858
859         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
860                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
861                                 MAX_SG_ENTRIES);
862                 return NULL;
863         }
864         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
865         if (!ctx)
866                 return NULL;
867
868         cf = &ctx->job;
869         ctx->op = op;
870         old_digest = ctx->digest;
871
872         /* output */
873         out_sg = &cf->sg[0];
874         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
875         out_sg->length = ses->digest_length;
876         cpu_to_hw_sg(out_sg);
877
878         /* input */
879         in_sg = &cf->sg[1];
880         /* need to extend the input to a compound frame */
881         in_sg->extension = 1;
882         in_sg->final = 1;
883         in_sg->length = sym->auth.data.length;
884         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
885
886         /* 1st seg */
887         sg = in_sg + 1;
888         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
889         sg->length = mbuf->data_len - sym->auth.data.offset;
890         sg->offset = sym->auth.data.offset;
891
892         /* Successive segs */
893         mbuf = mbuf->next;
894         while (mbuf) {
895                 cpu_to_hw_sg(sg);
896                 sg++;
897                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
898                 sg->length = mbuf->data_len;
899                 mbuf = mbuf->next;
900         }
901
902         if (is_decode(ses)) {
903                 /* Digest verification case */
904                 cpu_to_hw_sg(sg);
905                 sg++;
906                 rte_memcpy(old_digest, sym->auth.digest.data,
907                                 ses->digest_length);
908                 start_addr = dpaa_mem_vtop(old_digest);
909                 qm_sg_entry_set64(sg, start_addr);
910                 sg->length = ses->digest_length;
911                 in_sg->length += ses->digest_length;
912         } else {
913                 /* Digest calculation case */
914                 sg->length -= ses->digest_length;
915         }
916         sg->final = 1;
917         cpu_to_hw_sg(sg);
918         cpu_to_hw_sg(in_sg);
919
920         return cf;
921 }
922
923 /**
924  * packet looks like:
925  *              |<----data_len------->|
926  *    |ip_header|ah_header|icv|payload|
927  *              ^
928  *              |
929  *         mbuf->pkt.data
930  */
931 static inline struct dpaa_sec_job *
932 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
933 {
934         struct rte_crypto_sym_op *sym = op->sym;
935         struct rte_mbuf *mbuf = sym->m_src;
936         struct dpaa_sec_job *cf;
937         struct dpaa_sec_op_ctx *ctx;
938         struct qm_sg_entry *sg;
939         rte_iova_t start_addr;
940         uint8_t *old_digest;
941
942         ctx = dpaa_sec_alloc_ctx(ses, 4);
943         if (!ctx)
944                 return NULL;
945
946         cf = &ctx->job;
947         ctx->op = op;
948         old_digest = ctx->digest;
949
950         start_addr = rte_pktmbuf_iova(mbuf);
951         /* output */
952         sg = &cf->sg[0];
953         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
954         sg->length = ses->digest_length;
955         cpu_to_hw_sg(sg);
956
957         /* input */
958         sg = &cf->sg[1];
959         if (is_decode(ses)) {
960                 /* need to extend the input to a compound frame */
961                 sg->extension = 1;
962                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
963                 sg->length = sym->auth.data.length + ses->digest_length;
964                 sg->final = 1;
965                 cpu_to_hw_sg(sg);
966
967                 sg = &cf->sg[2];
968                 /* hash result or digest, save digest first */
969                 rte_memcpy(old_digest, sym->auth.digest.data,
970                            ses->digest_length);
971                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
972                 sg->length = sym->auth.data.length;
973                 cpu_to_hw_sg(sg);
974
975                 /* let's check digest by hw */
976                 start_addr = dpaa_mem_vtop(old_digest);
977                 sg++;
978                 qm_sg_entry_set64(sg, start_addr);
979                 sg->length = ses->digest_length;
980                 sg->final = 1;
981                 cpu_to_hw_sg(sg);
982         } else {
983                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
984                 sg->length = sym->auth.data.length;
985                 sg->final = 1;
986                 cpu_to_hw_sg(sg);
987         }
988
989         return cf;
990 }
991
992 static inline struct dpaa_sec_job *
993 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
994 {
995         struct rte_crypto_sym_op *sym = op->sym;
996         struct dpaa_sec_job *cf;
997         struct dpaa_sec_op_ctx *ctx;
998         struct qm_sg_entry *sg, *out_sg, *in_sg;
999         struct rte_mbuf *mbuf;
1000         uint8_t req_segs;
1001         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1002                         ses->iv.offset);
1003
1004         if (sym->m_dst) {
1005                 mbuf = sym->m_dst;
1006                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1007         } else {
1008                 mbuf = sym->m_src;
1009                 req_segs = mbuf->nb_segs * 2 + 3;
1010         }
1011
1012         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1013                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1014                                 MAX_SG_ENTRIES);
1015                 return NULL;
1016         }
1017
1018         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1019         if (!ctx)
1020                 return NULL;
1021
1022         cf = &ctx->job;
1023         ctx->op = op;
1024
1025         /* output */
1026         out_sg = &cf->sg[0];
1027         out_sg->extension = 1;
1028         out_sg->length = sym->cipher.data.length;
1029         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1030         cpu_to_hw_sg(out_sg);
1031
1032         /* 1st seg */
1033         sg = &cf->sg[2];
1034         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1035         sg->length = mbuf->data_len - sym->cipher.data.offset;
1036         sg->offset = sym->cipher.data.offset;
1037
1038         /* Successive segs */
1039         mbuf = mbuf->next;
1040         while (mbuf) {
1041                 cpu_to_hw_sg(sg);
1042                 sg++;
1043                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1044                 sg->length = mbuf->data_len;
1045                 mbuf = mbuf->next;
1046         }
1047         sg->final = 1;
1048         cpu_to_hw_sg(sg);
1049
1050         /* input */
1051         mbuf = sym->m_src;
1052         in_sg = &cf->sg[1];
1053         in_sg->extension = 1;
1054         in_sg->final = 1;
1055         in_sg->length = sym->cipher.data.length + ses->iv.length;
1056
1057         sg++;
1058         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1059         cpu_to_hw_sg(in_sg);
1060
1061         /* IV */
1062         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1063         sg->length = ses->iv.length;
1064         cpu_to_hw_sg(sg);
1065
1066         /* 1st seg */
1067         sg++;
1068         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1069         sg->length = mbuf->data_len - sym->cipher.data.offset;
1070         sg->offset = sym->cipher.data.offset;
1071
1072         /* Successive segs */
1073         mbuf = mbuf->next;
1074         while (mbuf) {
1075                 cpu_to_hw_sg(sg);
1076                 sg++;
1077                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1078                 sg->length = mbuf->data_len;
1079                 mbuf = mbuf->next;
1080         }
1081         sg->final = 1;
1082         cpu_to_hw_sg(sg);
1083
1084         return cf;
1085 }
1086
1087 static inline struct dpaa_sec_job *
1088 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1089 {
1090         struct rte_crypto_sym_op *sym = op->sym;
1091         struct dpaa_sec_job *cf;
1092         struct dpaa_sec_op_ctx *ctx;
1093         struct qm_sg_entry *sg;
1094         rte_iova_t src_start_addr, dst_start_addr;
1095         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1096                         ses->iv.offset);
1097
1098         ctx = dpaa_sec_alloc_ctx(ses, 4);
1099         if (!ctx)
1100                 return NULL;
1101
1102         cf = &ctx->job;
1103         ctx->op = op;
1104
1105         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1106
1107         if (sym->m_dst)
1108                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1109         else
1110                 dst_start_addr = src_start_addr;
1111
1112         /* output */
1113         sg = &cf->sg[0];
1114         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1115         sg->length = sym->cipher.data.length + ses->iv.length;
1116         cpu_to_hw_sg(sg);
1117
1118         /* input */
1119         sg = &cf->sg[1];
1120
1121         /* need to extend the input to a compound frame */
1122         sg->extension = 1;
1123         sg->final = 1;
1124         sg->length = sym->cipher.data.length + ses->iv.length;
1125         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1126         cpu_to_hw_sg(sg);
1127
1128         sg = &cf->sg[2];
1129         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1130         sg->length = ses->iv.length;
1131         cpu_to_hw_sg(sg);
1132
1133         sg++;
1134         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1135         sg->length = sym->cipher.data.length;
1136         sg->final = 1;
1137         cpu_to_hw_sg(sg);
1138
1139         return cf;
1140 }
1141
1142 static inline struct dpaa_sec_job *
1143 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1144 {
1145         struct rte_crypto_sym_op *sym = op->sym;
1146         struct dpaa_sec_job *cf;
1147         struct dpaa_sec_op_ctx *ctx;
1148         struct qm_sg_entry *sg, *out_sg, *in_sg;
1149         struct rte_mbuf *mbuf;
1150         uint8_t req_segs;
1151         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1152                         ses->iv.offset);
1153
1154         if (sym->m_dst) {
1155                 mbuf = sym->m_dst;
1156                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1157         } else {
1158                 mbuf = sym->m_src;
1159                 req_segs = mbuf->nb_segs * 2 + 4;
1160         }
1161
1162         if (ses->auth_only_len)
1163                 req_segs++;
1164
1165         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1166                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1167                                 MAX_SG_ENTRIES);
1168                 return NULL;
1169         }
1170
1171         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1172         if (!ctx)
1173                 return NULL;
1174
1175         cf = &ctx->job;
1176         ctx->op = op;
1177
1178         rte_prefetch0(cf->sg);
1179
1180         /* output */
1181         out_sg = &cf->sg[0];
1182         out_sg->extension = 1;
1183         if (is_encode(ses))
1184                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1185                                                 + ses->digest_length;
1186         else
1187                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1188
1189         /* output sg entries */
1190         sg = &cf->sg[2];
1191         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1192         cpu_to_hw_sg(out_sg);
1193
1194         /* 1st seg */
1195         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1196         sg->length = mbuf->data_len - sym->aead.data.offset +
1197                                         ses->auth_only_len;
1198         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1199
1200         /* Successive segs */
1201         mbuf = mbuf->next;
1202         while (mbuf) {
1203                 cpu_to_hw_sg(sg);
1204                 sg++;
1205                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1206                 sg->length = mbuf->data_len;
1207                 mbuf = mbuf->next;
1208         }
1209         sg->length -= ses->digest_length;
1210
1211         if (is_encode(ses)) {
1212                 cpu_to_hw_sg(sg);
1213                 /* set auth output */
1214                 sg++;
1215                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1216                 sg->length = ses->digest_length;
1217         }
1218         sg->final = 1;
1219         cpu_to_hw_sg(sg);
1220
1221         /* input */
1222         mbuf = sym->m_src;
1223         in_sg = &cf->sg[1];
1224         in_sg->extension = 1;
1225         in_sg->final = 1;
1226         if (is_encode(ses))
1227                 in_sg->length = ses->iv.length + sym->aead.data.length
1228                                                         + ses->auth_only_len;
1229         else
1230                 in_sg->length = ses->iv.length + sym->aead.data.length
1231                                 + ses->auth_only_len + ses->digest_length;
1232
1233         /* input sg entries */
1234         sg++;
1235         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1236         cpu_to_hw_sg(in_sg);
1237
1238         /* 1st seg IV */
1239         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1240         sg->length = ses->iv.length;
1241         cpu_to_hw_sg(sg);
1242
1243         /* 2nd seg auth only */
1244         if (ses->auth_only_len) {
1245                 sg++;
1246                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1247                 sg->length = ses->auth_only_len;
1248                 cpu_to_hw_sg(sg);
1249         }
1250
1251         /* 3rd seg */
1252         sg++;
1253         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1254         sg->length = mbuf->data_len - sym->aead.data.offset;
1255         sg->offset = sym->aead.data.offset;
1256
1257         /* Successive segs */
1258         mbuf = mbuf->next;
1259         while (mbuf) {
1260                 cpu_to_hw_sg(sg);
1261                 sg++;
1262                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1263                 sg->length = mbuf->data_len;
1264                 mbuf = mbuf->next;
1265         }
1266
1267         if (is_decode(ses)) {
1268                 cpu_to_hw_sg(sg);
1269                 sg++;
1270                 memcpy(ctx->digest, sym->aead.digest.data,
1271                         ses->digest_length);
1272                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1273                 sg->length = ses->digest_length;
1274         }
1275         sg->final = 1;
1276         cpu_to_hw_sg(sg);
1277
1278         return cf;
1279 }
1280
1281 static inline struct dpaa_sec_job *
1282 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1283 {
1284         struct rte_crypto_sym_op *sym = op->sym;
1285         struct dpaa_sec_job *cf;
1286         struct dpaa_sec_op_ctx *ctx;
1287         struct qm_sg_entry *sg;
1288         uint32_t length = 0;
1289         rte_iova_t src_start_addr, dst_start_addr;
1290         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1291                         ses->iv.offset);
1292
1293         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1294
1295         if (sym->m_dst)
1296                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1297         else
1298                 dst_start_addr = src_start_addr;
1299
1300         ctx = dpaa_sec_alloc_ctx(ses, 7);
1301         if (!ctx)
1302                 return NULL;
1303
1304         cf = &ctx->job;
1305         ctx->op = op;
1306
1307         /* input */
1308         rte_prefetch0(cf->sg);
1309         sg = &cf->sg[2];
1310         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1311         if (is_encode(ses)) {
1312                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1313                 sg->length = ses->iv.length;
1314                 length += sg->length;
1315                 cpu_to_hw_sg(sg);
1316
1317                 sg++;
1318                 if (ses->auth_only_len) {
1319                         qm_sg_entry_set64(sg,
1320                                           dpaa_mem_vtop(sym->aead.aad.data));
1321                         sg->length = ses->auth_only_len;
1322                         length += sg->length;
1323                         cpu_to_hw_sg(sg);
1324                         sg++;
1325                 }
1326                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1327                 sg->length = sym->aead.data.length;
1328                 length += sg->length;
1329                 sg->final = 1;
1330                 cpu_to_hw_sg(sg);
1331         } else {
1332                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1333                 sg->length = ses->iv.length;
1334                 length += sg->length;
1335                 cpu_to_hw_sg(sg);
1336
1337                 sg++;
1338                 if (ses->auth_only_len) {
1339                         qm_sg_entry_set64(sg,
1340                                           dpaa_mem_vtop(sym->aead.aad.data));
1341                         sg->length = ses->auth_only_len;
1342                         length += sg->length;
1343                         cpu_to_hw_sg(sg);
1344                         sg++;
1345                 }
1346                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1347                 sg->length = sym->aead.data.length;
1348                 length += sg->length;
1349                 cpu_to_hw_sg(sg);
1350
1351                 memcpy(ctx->digest, sym->aead.digest.data,
1352                        ses->digest_length);
1353                 sg++;
1354
1355                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1356                 sg->length = ses->digest_length;
1357                 length += sg->length;
1358                 sg->final = 1;
1359                 cpu_to_hw_sg(sg);
1360         }
1361         /* input compound frame */
1362         cf->sg[1].length = length;
1363         cf->sg[1].extension = 1;
1364         cf->sg[1].final = 1;
1365         cpu_to_hw_sg(&cf->sg[1]);
1366
1367         /* output */
1368         sg++;
1369         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1370         qm_sg_entry_set64(sg,
1371                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1372         sg->length = sym->aead.data.length + ses->auth_only_len;
1373         length = sg->length;
1374         if (is_encode(ses)) {
1375                 cpu_to_hw_sg(sg);
1376                 /* set auth output */
1377                 sg++;
1378                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1379                 sg->length = ses->digest_length;
1380                 length += sg->length;
1381         }
1382         sg->final = 1;
1383         cpu_to_hw_sg(sg);
1384
1385         /* output compound frame */
1386         cf->sg[0].length = length;
1387         cf->sg[0].extension = 1;
1388         cpu_to_hw_sg(&cf->sg[0]);
1389
1390         return cf;
1391 }
1392
1393 static inline struct dpaa_sec_job *
1394 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1395 {
1396         struct rte_crypto_sym_op *sym = op->sym;
1397         struct dpaa_sec_job *cf;
1398         struct dpaa_sec_op_ctx *ctx;
1399         struct qm_sg_entry *sg, *out_sg, *in_sg;
1400         struct rte_mbuf *mbuf;
1401         uint8_t req_segs;
1402         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1403                         ses->iv.offset);
1404
1405         if (sym->m_dst) {
1406                 mbuf = sym->m_dst;
1407                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1408         } else {
1409                 mbuf = sym->m_src;
1410                 req_segs = mbuf->nb_segs * 2 + 4;
1411         }
1412
1413         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1414                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1415                                 MAX_SG_ENTRIES);
1416                 return NULL;
1417         }
1418
1419         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1420         if (!ctx)
1421                 return NULL;
1422
1423         cf = &ctx->job;
1424         ctx->op = op;
1425
1426         rte_prefetch0(cf->sg);
1427
1428         /* output */
1429         out_sg = &cf->sg[0];
1430         out_sg->extension = 1;
1431         if (is_encode(ses))
1432                 out_sg->length = sym->auth.data.length + ses->digest_length;
1433         else
1434                 out_sg->length = sym->auth.data.length;
1435
1436         /* output sg entries */
1437         sg = &cf->sg[2];
1438         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1439         cpu_to_hw_sg(out_sg);
1440
1441         /* 1st seg */
1442         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1443         sg->length = mbuf->data_len - sym->auth.data.offset;
1444         sg->offset = sym->auth.data.offset;
1445
1446         /* Successive segs */
1447         mbuf = mbuf->next;
1448         while (mbuf) {
1449                 cpu_to_hw_sg(sg);
1450                 sg++;
1451                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1452                 sg->length = mbuf->data_len;
1453                 mbuf = mbuf->next;
1454         }
1455         sg->length -= ses->digest_length;
1456
1457         if (is_encode(ses)) {
1458                 cpu_to_hw_sg(sg);
1459                 /* set auth output */
1460                 sg++;
1461                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1462                 sg->length = ses->digest_length;
1463         }
1464         sg->final = 1;
1465         cpu_to_hw_sg(sg);
1466
1467         /* input */
1468         mbuf = sym->m_src;
1469         in_sg = &cf->sg[1];
1470         in_sg->extension = 1;
1471         in_sg->final = 1;
1472         if (is_encode(ses))
1473                 in_sg->length = ses->iv.length + sym->auth.data.length;
1474         else
1475                 in_sg->length = ses->iv.length + sym->auth.data.length
1476                                                 + ses->digest_length;
1477
1478         /* input sg entries */
1479         sg++;
1480         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1481         cpu_to_hw_sg(in_sg);
1482
1483         /* 1st seg IV */
1484         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1485         sg->length = ses->iv.length;
1486         cpu_to_hw_sg(sg);
1487
1488         /* 2nd seg */
1489         sg++;
1490         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1491         sg->length = mbuf->data_len - sym->auth.data.offset;
1492         sg->offset = sym->auth.data.offset;
1493
1494         /* Successive segs */
1495         mbuf = mbuf->next;
1496         while (mbuf) {
1497                 cpu_to_hw_sg(sg);
1498                 sg++;
1499                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1500                 sg->length = mbuf->data_len;
1501                 mbuf = mbuf->next;
1502         }
1503
1504         sg->length -= ses->digest_length;
1505         if (is_decode(ses)) {
1506                 cpu_to_hw_sg(sg);
1507                 sg++;
1508                 memcpy(ctx->digest, sym->auth.digest.data,
1509                         ses->digest_length);
1510                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1511                 sg->length = ses->digest_length;
1512         }
1513         sg->final = 1;
1514         cpu_to_hw_sg(sg);
1515
1516         return cf;
1517 }
1518
1519 static inline struct dpaa_sec_job *
1520 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1521 {
1522         struct rte_crypto_sym_op *sym = op->sym;
1523         struct dpaa_sec_job *cf;
1524         struct dpaa_sec_op_ctx *ctx;
1525         struct qm_sg_entry *sg;
1526         rte_iova_t src_start_addr, dst_start_addr;
1527         uint32_t length = 0;
1528         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1529                         ses->iv.offset);
1530
1531         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1532         if (sym->m_dst)
1533                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1534         else
1535                 dst_start_addr = src_start_addr;
1536
1537         ctx = dpaa_sec_alloc_ctx(ses, 7);
1538         if (!ctx)
1539                 return NULL;
1540
1541         cf = &ctx->job;
1542         ctx->op = op;
1543
1544         /* input */
1545         rte_prefetch0(cf->sg);
1546         sg = &cf->sg[2];
1547         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1548         if (is_encode(ses)) {
1549                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1550                 sg->length = ses->iv.length;
1551                 length += sg->length;
1552                 cpu_to_hw_sg(sg);
1553
1554                 sg++;
1555                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1556                 sg->length = sym->auth.data.length;
1557                 length += sg->length;
1558                 sg->final = 1;
1559                 cpu_to_hw_sg(sg);
1560         } else {
1561                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1562                 sg->length = ses->iv.length;
1563                 length += sg->length;
1564                 cpu_to_hw_sg(sg);
1565
1566                 sg++;
1567
1568                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1569                 sg->length = sym->auth.data.length;
1570                 length += sg->length;
1571                 cpu_to_hw_sg(sg);
1572
1573                 memcpy(ctx->digest, sym->auth.digest.data,
1574                        ses->digest_length);
1575                 sg++;
1576
1577                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1578                 sg->length = ses->digest_length;
1579                 length += sg->length;
1580                 sg->final = 1;
1581                 cpu_to_hw_sg(sg);
1582         }
1583         /* input compound frame */
1584         cf->sg[1].length = length;
1585         cf->sg[1].extension = 1;
1586         cf->sg[1].final = 1;
1587         cpu_to_hw_sg(&cf->sg[1]);
1588
1589         /* output */
1590         sg++;
1591         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1592         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1593         sg->length = sym->cipher.data.length;
1594         length = sg->length;
1595         if (is_encode(ses)) {
1596                 cpu_to_hw_sg(sg);
1597                 /* set auth output */
1598                 sg++;
1599                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1600                 sg->length = ses->digest_length;
1601                 length += sg->length;
1602         }
1603         sg->final = 1;
1604         cpu_to_hw_sg(sg);
1605
1606         /* output compound frame */
1607         cf->sg[0].length = length;
1608         cf->sg[0].extension = 1;
1609         cpu_to_hw_sg(&cf->sg[0]);
1610
1611         return cf;
1612 }
1613
1614 static inline struct dpaa_sec_job *
1615 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1616 {
1617         struct rte_crypto_sym_op *sym = op->sym;
1618         struct dpaa_sec_job *cf;
1619         struct dpaa_sec_op_ctx *ctx;
1620         struct qm_sg_entry *sg;
1621         phys_addr_t src_start_addr, dst_start_addr;
1622
1623         ctx = dpaa_sec_alloc_ctx(ses, 2);
1624         if (!ctx)
1625                 return NULL;
1626         cf = &ctx->job;
1627         ctx->op = op;
1628
1629         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1630
1631         if (sym->m_dst)
1632                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1633         else
1634                 dst_start_addr = src_start_addr;
1635
1636         /* input */
1637         sg = &cf->sg[1];
1638         qm_sg_entry_set64(sg, src_start_addr);
1639         sg->length = sym->m_src->pkt_len;
1640         sg->final = 1;
1641         cpu_to_hw_sg(sg);
1642
1643         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1644         /* output */
1645         sg = &cf->sg[0];
1646         qm_sg_entry_set64(sg, dst_start_addr);
1647         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1648         cpu_to_hw_sg(sg);
1649
1650         return cf;
1651 }
1652
1653 static inline struct dpaa_sec_job *
1654 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1655 {
1656         struct rte_crypto_sym_op *sym = op->sym;
1657         struct dpaa_sec_job *cf;
1658         struct dpaa_sec_op_ctx *ctx;
1659         struct qm_sg_entry *sg, *out_sg, *in_sg;
1660         struct rte_mbuf *mbuf;
1661         uint8_t req_segs;
1662         uint32_t in_len = 0, out_len = 0;
1663
1664         if (sym->m_dst)
1665                 mbuf = sym->m_dst;
1666         else
1667                 mbuf = sym->m_src;
1668
1669         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1670         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1671                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1672                                 MAX_SG_ENTRIES);
1673                 return NULL;
1674         }
1675
1676         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1677         if (!ctx)
1678                 return NULL;
1679         cf = &ctx->job;
1680         ctx->op = op;
1681         /* output */
1682         out_sg = &cf->sg[0];
1683         out_sg->extension = 1;
1684         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1685
1686         /* 1st seg */
1687         sg = &cf->sg[2];
1688         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1689         sg->offset = 0;
1690
1691         /* Successive segs */
1692         while (mbuf->next) {
1693                 sg->length = mbuf->data_len;
1694                 out_len += sg->length;
1695                 mbuf = mbuf->next;
1696                 cpu_to_hw_sg(sg);
1697                 sg++;
1698                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1699                 sg->offset = 0;
1700         }
1701         sg->length = mbuf->buf_len - mbuf->data_off;
1702         out_len += sg->length;
1703         sg->final = 1;
1704         cpu_to_hw_sg(sg);
1705
1706         out_sg->length = out_len;
1707         cpu_to_hw_sg(out_sg);
1708
1709         /* input */
1710         mbuf = sym->m_src;
1711         in_sg = &cf->sg[1];
1712         in_sg->extension = 1;
1713         in_sg->final = 1;
1714         in_len = mbuf->data_len;
1715
1716         sg++;
1717         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1718
1719         /* 1st seg */
1720         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1721         sg->length = mbuf->data_len;
1722         sg->offset = 0;
1723
1724         /* Successive segs */
1725         mbuf = mbuf->next;
1726         while (mbuf) {
1727                 cpu_to_hw_sg(sg);
1728                 sg++;
1729                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1730                 sg->length = mbuf->data_len;
1731                 sg->offset = 0;
1732                 in_len += sg->length;
1733                 mbuf = mbuf->next;
1734         }
1735         sg->final = 1;
1736         cpu_to_hw_sg(sg);
1737
1738         in_sg->length = in_len;
1739         cpu_to_hw_sg(in_sg);
1740
1741         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1742
1743         return cf;
1744 }
1745
1746 static uint16_t
1747 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1748                        uint16_t nb_ops)
1749 {
1750         /* Function to transmit the frames to given device and queuepair */
1751         uint32_t loop;
1752         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1753         uint16_t num_tx = 0;
1754         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1755         uint32_t frames_to_send;
1756         struct rte_crypto_op *op;
1757         struct dpaa_sec_job *cf;
1758         dpaa_sec_session *ses;
1759         uint32_t auth_only_len;
1760         struct qman_fq *inq[DPAA_SEC_BURST];
1761
1762         while (nb_ops) {
1763                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1764                                 DPAA_SEC_BURST : nb_ops;
1765                 for (loop = 0; loop < frames_to_send; loop++) {
1766                         op = *(ops++);
1767                         switch (op->sess_type) {
1768                         case RTE_CRYPTO_OP_WITH_SESSION:
1769                                 ses = (dpaa_sec_session *)
1770                                         get_sym_session_private_data(
1771                                                         op->sym->session,
1772                                                         cryptodev_driver_id);
1773                                 break;
1774                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1775                                 ses = (dpaa_sec_session *)
1776                                         get_sec_session_private_data(
1777                                                         op->sym->sec_session);
1778                                 break;
1779                         default:
1780                                 DPAA_SEC_DP_ERR(
1781                                         "sessionless crypto op not supported");
1782                                 frames_to_send = loop;
1783                                 nb_ops = loop;
1784                                 goto send_pkts;
1785                         }
1786                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1787                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1788                                         frames_to_send = loop;
1789                                         nb_ops = loop;
1790                                         goto send_pkts;
1791                                 }
1792                         } else if (unlikely(ses->qp[rte_lcore_id() %
1793                                                 MAX_DPAA_CORES] != qp)) {
1794                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1795                                         " New qp = %p\n",
1796                                         ses->qp[rte_lcore_id() %
1797                                         MAX_DPAA_CORES], qp);
1798                                 frames_to_send = loop;
1799                                 nb_ops = loop;
1800                                 goto send_pkts;
1801                         }
1802
1803                         auth_only_len = op->sym->auth.data.length -
1804                                                 op->sym->cipher.data.length;
1805                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1806                                   ((op->sym->m_dst == NULL) ||
1807                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1808                                 if (is_proto_ipsec(ses)) {
1809                                         cf = build_proto(op, ses);
1810                                 } else if (is_proto_pdcp(ses)) {
1811                                         cf = build_proto(op, ses);
1812                                 } else if (is_auth_only(ses)) {
1813                                         cf = build_auth_only(op, ses);
1814                                 } else if (is_cipher_only(ses)) {
1815                                         cf = build_cipher_only(op, ses);
1816                                 } else if (is_aead(ses)) {
1817                                         cf = build_cipher_auth_gcm(op, ses);
1818                                         auth_only_len = ses->auth_only_len;
1819                                 } else if (is_auth_cipher(ses)) {
1820                                         cf = build_cipher_auth(op, ses);
1821                                 } else {
1822                                         DPAA_SEC_DP_ERR("not supported ops");
1823                                         frames_to_send = loop;
1824                                         nb_ops = loop;
1825                                         goto send_pkts;
1826                                 }
1827                         } else {
1828                                 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1829                                         cf = build_proto_sg(op, ses);
1830                                 } else if (is_auth_only(ses)) {
1831                                         cf = build_auth_only_sg(op, ses);
1832                                 } else if (is_cipher_only(ses)) {
1833                                         cf = build_cipher_only_sg(op, ses);
1834                                 } else if (is_aead(ses)) {
1835                                         cf = build_cipher_auth_gcm_sg(op, ses);
1836                                         auth_only_len = ses->auth_only_len;
1837                                 } else if (is_auth_cipher(ses)) {
1838                                         cf = build_cipher_auth_sg(op, ses);
1839                                 } else {
1840                                         DPAA_SEC_DP_ERR("not supported ops");
1841                                         frames_to_send = loop;
1842                                         nb_ops = loop;
1843                                         goto send_pkts;
1844                                 }
1845                         }
1846                         if (unlikely(!cf)) {
1847                                 frames_to_send = loop;
1848                                 nb_ops = loop;
1849                                 goto send_pkts;
1850                         }
1851
1852                         fd = &fds[loop];
1853                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1854                         fd->opaque_addr = 0;
1855                         fd->cmd = 0;
1856                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1857                         fd->_format1 = qm_fd_compound;
1858                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1859                         /* Auth_only_len is set as 0 in descriptor and it is
1860                          * overwritten here in the fd.cmd which will update
1861                          * the DPOVRD reg.
1862                          */
1863                         if (auth_only_len)
1864                                 fd->cmd = 0x80000000 | auth_only_len;
1865
1866                         /* In case of PDCP, per packet HFN is stored in
1867                          * mbuf priv after sym_op.
1868                          */
1869                         if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1870                                 fd->cmd = 0x80000000 |
1871                                         *((uint32_t *)((uint8_t *)op +
1872                                         ses->pdcp.hfn_ovd_offset));
1873                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1874                                         *((uint32_t *)((uint8_t *)op +
1875                                         ses->pdcp.hfn_ovd_offset)),
1876                                         ses->pdcp.hfn_ovd,
1877                                         is_proto_pdcp(ses));
1878                         }
1879
1880                 }
1881 send_pkts:
1882                 loop = 0;
1883                 while (loop < frames_to_send) {
1884                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1885                                         frames_to_send - loop);
1886                 }
1887                 nb_ops -= frames_to_send;
1888                 num_tx += frames_to_send;
1889         }
1890
1891         dpaa_qp->tx_pkts += num_tx;
1892         dpaa_qp->tx_errs += nb_ops - num_tx;
1893
1894         return num_tx;
1895 }
1896
1897 static uint16_t
1898 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1899                        uint16_t nb_ops)
1900 {
1901         uint16_t num_rx;
1902         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1903
1904         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1905
1906         dpaa_qp->rx_pkts += num_rx;
1907         dpaa_qp->rx_errs += nb_ops - num_rx;
1908
1909         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1910
1911         return num_rx;
1912 }
1913
1914 /** Release queue pair */
1915 static int
1916 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1917                             uint16_t qp_id)
1918 {
1919         struct dpaa_sec_dev_private *internals;
1920         struct dpaa_sec_qp *qp = NULL;
1921
1922         PMD_INIT_FUNC_TRACE();
1923
1924         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1925
1926         internals = dev->data->dev_private;
1927         if (qp_id >= internals->max_nb_queue_pairs) {
1928                 DPAA_SEC_ERR("Max supported qpid %d",
1929                              internals->max_nb_queue_pairs);
1930                 return -EINVAL;
1931         }
1932
1933         qp = &internals->qps[qp_id];
1934         rte_mempool_free(qp->ctx_pool);
1935         qp->internals = NULL;
1936         dev->data->queue_pairs[qp_id] = NULL;
1937
1938         return 0;
1939 }
1940
1941 /** Setup a queue pair */
1942 static int
1943 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1944                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1945                 __rte_unused int socket_id)
1946 {
1947         struct dpaa_sec_dev_private *internals;
1948         struct dpaa_sec_qp *qp = NULL;
1949         char str[20];
1950
1951         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1952
1953         internals = dev->data->dev_private;
1954         if (qp_id >= internals->max_nb_queue_pairs) {
1955                 DPAA_SEC_ERR("Max supported qpid %d",
1956                              internals->max_nb_queue_pairs);
1957                 return -EINVAL;
1958         }
1959
1960         qp = &internals->qps[qp_id];
1961         qp->internals = internals;
1962         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1963                         dev->data->dev_id, qp_id);
1964         if (!qp->ctx_pool) {
1965                 qp->ctx_pool = rte_mempool_create((const char *)str,
1966                                                         CTX_POOL_NUM_BUFS,
1967                                                         CTX_POOL_BUF_SIZE,
1968                                                         CTX_POOL_CACHE_SIZE, 0,
1969                                                         NULL, NULL, NULL, NULL,
1970                                                         SOCKET_ID_ANY, 0);
1971                 if (!qp->ctx_pool) {
1972                         DPAA_SEC_ERR("%s create failed\n", str);
1973                         return -ENOMEM;
1974                 }
1975         } else
1976                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1977                                 dev->data->dev_id, qp_id);
1978         dev->data->queue_pairs[qp_id] = qp;
1979
1980         return 0;
1981 }
1982
1983 /** Return the number of allocated queue pairs */
1984 static uint32_t
1985 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1986 {
1987         PMD_INIT_FUNC_TRACE();
1988
1989         return dev->data->nb_queue_pairs;
1990 }
1991
1992 /** Returns the size of session structure */
1993 static unsigned int
1994 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1995 {
1996         PMD_INIT_FUNC_TRACE();
1997
1998         return sizeof(dpaa_sec_session);
1999 }
2000
2001 static int
2002 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2003                      struct rte_crypto_sym_xform *xform,
2004                      dpaa_sec_session *session)
2005 {
2006         session->cipher_alg = xform->cipher.algo;
2007         session->iv.length = xform->cipher.iv.length;
2008         session->iv.offset = xform->cipher.iv.offset;
2009         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2010                                                RTE_CACHE_LINE_SIZE);
2011         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2012                 DPAA_SEC_ERR("No Memory for cipher key");
2013                 return -ENOMEM;
2014         }
2015         session->cipher_key.length = xform->cipher.key.length;
2016
2017         memcpy(session->cipher_key.data, xform->cipher.key.data,
2018                xform->cipher.key.length);
2019         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2020                         DIR_ENC : DIR_DEC;
2021
2022         return 0;
2023 }
2024
2025 static int
2026 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2027                    struct rte_crypto_sym_xform *xform,
2028                    dpaa_sec_session *session)
2029 {
2030         session->auth_alg = xform->auth.algo;
2031         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2032                                              RTE_CACHE_LINE_SIZE);
2033         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2034                 DPAA_SEC_ERR("No Memory for auth key");
2035                 return -ENOMEM;
2036         }
2037         session->auth_key.length = xform->auth.key.length;
2038         session->digest_length = xform->auth.digest_length;
2039
2040         memcpy(session->auth_key.data, xform->auth.key.data,
2041                xform->auth.key.length);
2042         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2043                         DIR_ENC : DIR_DEC;
2044
2045         return 0;
2046 }
2047
2048 static int
2049 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2050                    struct rte_crypto_sym_xform *xform,
2051                    dpaa_sec_session *session)
2052 {
2053         session->aead_alg = xform->aead.algo;
2054         session->iv.length = xform->aead.iv.length;
2055         session->iv.offset = xform->aead.iv.offset;
2056         session->auth_only_len = xform->aead.aad_length;
2057         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2058                                              RTE_CACHE_LINE_SIZE);
2059         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2060                 DPAA_SEC_ERR("No Memory for aead key\n");
2061                 return -ENOMEM;
2062         }
2063         session->aead_key.length = xform->aead.key.length;
2064         session->digest_length = xform->aead.digest_length;
2065
2066         memcpy(session->aead_key.data, xform->aead.key.data,
2067                xform->aead.key.length);
2068         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2069                         DIR_ENC : DIR_DEC;
2070
2071         return 0;
2072 }
2073
2074 static struct qman_fq *
2075 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2076 {
2077         unsigned int i;
2078
2079         for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2080                 if (qi->inq_attach[i] == 0) {
2081                         qi->inq_attach[i] = 1;
2082                         return &qi->inq[i];
2083                 }
2084         }
2085         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2086
2087         return NULL;
2088 }
2089
2090 static int
2091 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2092 {
2093         unsigned int i;
2094
2095         for (i = 0; i < qi->max_nb_sessions; i++) {
2096                 if (&qi->inq[i] == fq) {
2097                         qman_retire_fq(fq, NULL);
2098                         qman_oos_fq(fq);
2099                         qi->inq_attach[i] = 0;
2100                         return 0;
2101                 }
2102         }
2103         return -1;
2104 }
2105
2106 static int
2107 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2108 {
2109         int ret;
2110
2111         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2112         ret = dpaa_sec_prep_cdb(sess);
2113         if (ret) {
2114                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2115                 return -1;
2116         }
2117         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2118                 ret = rte_dpaa_portal_init((void *)0);
2119                 if (ret) {
2120                         DPAA_SEC_ERR("Failure in affining portal");
2121                         return ret;
2122                 }
2123         }
2124         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2125                                dpaa_mem_vtop(&sess->cdb),
2126                                qman_fq_fqid(&qp->outq));
2127         if (ret)
2128                 DPAA_SEC_ERR("Unable to init sec queue");
2129
2130         return ret;
2131 }
2132
2133 static int
2134 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2135                             struct rte_crypto_sym_xform *xform, void *sess)
2136 {
2137         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2138         dpaa_sec_session *session = sess;
2139         uint32_t i;
2140
2141         PMD_INIT_FUNC_TRACE();
2142
2143         if (unlikely(sess == NULL)) {
2144                 DPAA_SEC_ERR("invalid session struct");
2145                 return -EINVAL;
2146         }
2147         memset(session, 0, sizeof(dpaa_sec_session));
2148
2149         /* Default IV length = 0 */
2150         session->iv.length = 0;
2151
2152         /* Cipher Only */
2153         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2154                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2155                 dpaa_sec_cipher_init(dev, xform, session);
2156
2157         /* Authentication Only */
2158         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2159                    xform->next == NULL) {
2160                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2161                 dpaa_sec_auth_init(dev, xform, session);
2162
2163         /* Cipher then Authenticate */
2164         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2165                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2166                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2167                         dpaa_sec_cipher_init(dev, xform, session);
2168                         dpaa_sec_auth_init(dev, xform->next, session);
2169                 } else {
2170                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2171                         return -EINVAL;
2172                 }
2173
2174         /* Authenticate then Cipher */
2175         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2176                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2177                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2178                         dpaa_sec_auth_init(dev, xform, session);
2179                         dpaa_sec_cipher_init(dev, xform->next, session);
2180                 } else {
2181                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2182                         return -EINVAL;
2183                 }
2184
2185         /* AEAD operation for AES-GCM kind of Algorithms */
2186         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2187                    xform->next == NULL) {
2188                 dpaa_sec_aead_init(dev, xform, session);
2189
2190         } else {
2191                 DPAA_SEC_ERR("Invalid crypto type");
2192                 return -EINVAL;
2193         }
2194         rte_spinlock_lock(&internals->lock);
2195         for (i = 0; i < MAX_DPAA_CORES; i++) {
2196                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2197                 if (session->inq[i] == NULL) {
2198                         DPAA_SEC_ERR("unable to attach sec queue");
2199                         rte_spinlock_unlock(&internals->lock);
2200                         goto err1;
2201                 }
2202         }
2203         rte_spinlock_unlock(&internals->lock);
2204
2205         return 0;
2206
2207 err1:
2208         rte_free(session->cipher_key.data);
2209         rte_free(session->auth_key.data);
2210         memset(session, 0, sizeof(dpaa_sec_session));
2211
2212         return -EINVAL;
2213 }
2214
2215 static int
2216 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2217                 struct rte_crypto_sym_xform *xform,
2218                 struct rte_cryptodev_sym_session *sess,
2219                 struct rte_mempool *mempool)
2220 {
2221         void *sess_private_data;
2222         int ret;
2223
2224         PMD_INIT_FUNC_TRACE();
2225
2226         if (rte_mempool_get(mempool, &sess_private_data)) {
2227                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2228                 return -ENOMEM;
2229         }
2230
2231         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2232         if (ret != 0) {
2233                 DPAA_SEC_ERR("failed to configure session parameters");
2234
2235                 /* Return session to mempool */
2236                 rte_mempool_put(mempool, sess_private_data);
2237                 return ret;
2238         }
2239
2240         set_sym_session_private_data(sess, dev->driver_id,
2241                         sess_private_data);
2242
2243
2244         return 0;
2245 }
2246
2247 static inline void
2248 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2249 {
2250         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2251         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2252         uint8_t i;
2253
2254         for (i = 0; i < MAX_DPAA_CORES; i++) {
2255                 if (s->inq[i])
2256                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2257                 s->inq[i] = NULL;
2258                 s->qp[i] = NULL;
2259         }
2260         rte_free(s->cipher_key.data);
2261         rte_free(s->auth_key.data);
2262         memset(s, 0, sizeof(dpaa_sec_session));
2263         rte_mempool_put(sess_mp, (void *)s);
2264 }
2265
2266 /** Clear the memory of session so it doesn't leave key material behind */
2267 static void
2268 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2269                 struct rte_cryptodev_sym_session *sess)
2270 {
2271         PMD_INIT_FUNC_TRACE();
2272         uint8_t index = dev->driver_id;
2273         void *sess_priv = get_sym_session_private_data(sess, index);
2274         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2275
2276         if (sess_priv) {
2277                 free_session_memory(dev, s);
2278                 set_sym_session_private_data(sess, index, NULL);
2279         }
2280 }
2281
2282 static int
2283 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2284                            struct rte_security_session_conf *conf,
2285                            void *sess)
2286 {
2287         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2288         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2289         struct rte_crypto_auth_xform *auth_xform = NULL;
2290         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2291         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2292         uint32_t i;
2293
2294         PMD_INIT_FUNC_TRACE();
2295
2296         memset(session, 0, sizeof(dpaa_sec_session));
2297         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2298                 cipher_xform = &conf->crypto_xform->cipher;
2299                 if (conf->crypto_xform->next)
2300                         auth_xform = &conf->crypto_xform->next->auth;
2301         } else {
2302                 auth_xform = &conf->crypto_xform->auth;
2303                 if (conf->crypto_xform->next)
2304                         cipher_xform = &conf->crypto_xform->next->cipher;
2305         }
2306         session->proto_alg = conf->protocol;
2307
2308         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2309                 session->cipher_key.data = rte_zmalloc(NULL,
2310                                                        cipher_xform->key.length,
2311                                                        RTE_CACHE_LINE_SIZE);
2312                 if (session->cipher_key.data == NULL &&
2313                                 cipher_xform->key.length > 0) {
2314                         DPAA_SEC_ERR("No Memory for cipher key");
2315                         return -ENOMEM;
2316                 }
2317                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2318                                 cipher_xform->key.length);
2319                 session->cipher_key.length = cipher_xform->key.length;
2320
2321                 switch (cipher_xform->algo) {
2322                 case RTE_CRYPTO_CIPHER_AES_CBC:
2323                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2324                 case RTE_CRYPTO_CIPHER_AES_CTR:
2325                         break;
2326                 default:
2327                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2328                                 cipher_xform->algo);
2329                         goto out;
2330                 }
2331                 session->cipher_alg = cipher_xform->algo;
2332         } else {
2333                 session->cipher_key.data = NULL;
2334                 session->cipher_key.length = 0;
2335                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2336         }
2337
2338         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2339                 session->auth_key.data = rte_zmalloc(NULL,
2340                                                 auth_xform->key.length,
2341                                                 RTE_CACHE_LINE_SIZE);
2342                 if (session->auth_key.data == NULL &&
2343                                 auth_xform->key.length > 0) {
2344                         DPAA_SEC_ERR("No Memory for auth key");
2345                         rte_free(session->cipher_key.data);
2346                         return -ENOMEM;
2347                 }
2348                 memcpy(session->auth_key.data, auth_xform->key.data,
2349                                 auth_xform->key.length);
2350                 session->auth_key.length = auth_xform->key.length;
2351
2352                 switch (auth_xform->algo) {
2353                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2354                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2355                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2356                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2357                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2358                 case RTE_CRYPTO_AUTH_AES_CMAC:
2359                         break;
2360                 default:
2361                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2362                                 auth_xform->algo);
2363                         goto out;
2364                 }
2365                 session->auth_alg = auth_xform->algo;
2366         } else {
2367                 session->auth_key.data = NULL;
2368                 session->auth_key.length = 0;
2369                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2370         }
2371
2372         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2373                 if (ipsec_xform->tunnel.type ==
2374                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2375                         memset(&session->encap_pdb, 0,
2376                                 sizeof(struct ipsec_encap_pdb) +
2377                                 sizeof(session->ip4_hdr));
2378                         session->ip4_hdr.ip_v = IPVERSION;
2379                         session->ip4_hdr.ip_hl = 5;
2380                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2381                                                 sizeof(session->ip4_hdr));
2382                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2383                         session->ip4_hdr.ip_id = 0;
2384                         session->ip4_hdr.ip_off = 0;
2385                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2386                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2387                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2388                                         IPPROTO_ESP : IPPROTO_AH;
2389                         session->ip4_hdr.ip_sum = 0;
2390                         session->ip4_hdr.ip_src =
2391                                         ipsec_xform->tunnel.ipv4.src_ip;
2392                         session->ip4_hdr.ip_dst =
2393                                         ipsec_xform->tunnel.ipv4.dst_ip;
2394                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2395                                                 (void *)&session->ip4_hdr,
2396                                                 sizeof(struct ip));
2397                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2398                 } else if (ipsec_xform->tunnel.type ==
2399                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2400                         memset(&session->encap_pdb, 0,
2401                                 sizeof(struct ipsec_encap_pdb) +
2402                                 sizeof(session->ip6_hdr));
2403                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2404                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2405                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2406                                         RTE_IPV6_HDR_TC_SHIFT) &
2407                                         RTE_IPV6_HDR_TC_MASK) |
2408                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2409                                         RTE_IPV6_HDR_FL_SHIFT) &
2410                                         RTE_IPV6_HDR_FL_MASK));
2411                         /* Payload length will be updated by HW */
2412                         session->ip6_hdr.payload_len = 0;
2413                         session->ip6_hdr.hop_limits =
2414                                         ipsec_xform->tunnel.ipv6.hlimit;
2415                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2416                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2417                                         IPPROTO_ESP : IPPROTO_AH;
2418                         memcpy(&session->ip6_hdr.src_addr,
2419                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2420                         memcpy(&session->ip6_hdr.dst_addr,
2421                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2422                         session->encap_pdb.ip_hdr_len =
2423                                                 sizeof(struct rte_ipv6_hdr);
2424                 }
2425                 session->encap_pdb.options =
2426                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2427                         PDBOPTS_ESP_OIHI_PDB_INL |
2428                         PDBOPTS_ESP_IVSRC |
2429                         PDBHMO_ESP_ENCAP_DTTL |
2430                         PDBHMO_ESP_SNR;
2431                 if (ipsec_xform->options.esn)
2432                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2433                 session->encap_pdb.spi = ipsec_xform->spi;
2434                 session->dir = DIR_ENC;
2435         } else if (ipsec_xform->direction ==
2436                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2437                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2438                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2439                         session->decap_pdb.options = sizeof(struct ip) << 16;
2440                 else
2441                         session->decap_pdb.options =
2442                                         sizeof(struct rte_ipv6_hdr) << 16;
2443                 if (ipsec_xform->options.esn)
2444                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2445                 session->dir = DIR_DEC;
2446         } else
2447                 goto out;
2448         rte_spinlock_lock(&internals->lock);
2449         for (i = 0; i < MAX_DPAA_CORES; i++) {
2450                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2451                 if (session->inq[i] == NULL) {
2452                         DPAA_SEC_ERR("unable to attach sec queue");
2453                         rte_spinlock_unlock(&internals->lock);
2454                         goto out;
2455                 }
2456         }
2457         rte_spinlock_unlock(&internals->lock);
2458
2459         return 0;
2460 out:
2461         rte_free(session->auth_key.data);
2462         rte_free(session->cipher_key.data);
2463         memset(session, 0, sizeof(dpaa_sec_session));
2464         return -1;
2465 }
2466
2467 static int
2468 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2469                           struct rte_security_session_conf *conf,
2470                           void *sess)
2471 {
2472         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2473         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2474         struct rte_crypto_auth_xform *auth_xform = NULL;
2475         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2476         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2477         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2478         uint32_t i;
2479
2480         PMD_INIT_FUNC_TRACE();
2481
2482         memset(session, 0, sizeof(dpaa_sec_session));
2483
2484         /* find xfrm types */
2485         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2486                 cipher_xform = &xform->cipher;
2487                 if (xform->next != NULL)
2488                         auth_xform = &xform->next->auth;
2489         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2490                 auth_xform = &xform->auth;
2491                 if (xform->next != NULL)
2492                         cipher_xform = &xform->next->cipher;
2493         } else {
2494                 DPAA_SEC_ERR("Invalid crypto type");
2495                 return -EINVAL;
2496         }
2497
2498         session->proto_alg = conf->protocol;
2499         if (cipher_xform) {
2500                 session->cipher_key.data = rte_zmalloc(NULL,
2501                                                cipher_xform->key.length,
2502                                                RTE_CACHE_LINE_SIZE);
2503                 if (session->cipher_key.data == NULL &&
2504                                 cipher_xform->key.length > 0) {
2505                         DPAA_SEC_ERR("No Memory for cipher key");
2506                         return -ENOMEM;
2507                 }
2508                 session->cipher_key.length = cipher_xform->key.length;
2509                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2510                         cipher_xform->key.length);
2511                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2512                                         DIR_ENC : DIR_DEC;
2513                 session->cipher_alg = cipher_xform->algo;
2514         } else {
2515                 session->cipher_key.data = NULL;
2516                 session->cipher_key.length = 0;
2517                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2518                 session->dir = DIR_ENC;
2519         }
2520
2521         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2522                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2523                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2524                         DPAA_SEC_ERR(
2525                                 "PDCP Seq Num size should be 5/12 bits for cmode");
2526                         goto out;
2527                 }
2528         }
2529
2530         if (auth_xform) {
2531                 session->auth_key.data = rte_zmalloc(NULL,
2532                                                      auth_xform->key.length,
2533                                                      RTE_CACHE_LINE_SIZE);
2534                 if (!session->auth_key.data &&
2535                     auth_xform->key.length > 0) {
2536                         DPAA_SEC_ERR("No Memory for auth key");
2537                         rte_free(session->cipher_key.data);
2538                         return -ENOMEM;
2539                 }
2540                 session->auth_key.length = auth_xform->key.length;
2541                 memcpy(session->auth_key.data, auth_xform->key.data,
2542                        auth_xform->key.length);
2543                 session->auth_alg = auth_xform->algo;
2544         } else {
2545                 session->auth_key.data = NULL;
2546                 session->auth_key.length = 0;
2547                 session->auth_alg = 0;
2548         }
2549         session->pdcp.domain = pdcp_xform->domain;
2550         session->pdcp.bearer = pdcp_xform->bearer;
2551         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2552         session->pdcp.sn_size = pdcp_xform->sn_size;
2553         session->pdcp.hfn = pdcp_xform->hfn;
2554         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2555         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2556         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2557
2558         rte_spinlock_lock(&dev_priv->lock);
2559         for (i = 0; i < MAX_DPAA_CORES; i++) {
2560                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2561                 if (session->inq[i] == NULL) {
2562                         DPAA_SEC_ERR("unable to attach sec queue");
2563                         rte_spinlock_unlock(&dev_priv->lock);
2564                         goto out;
2565                 }
2566         }
2567         rte_spinlock_unlock(&dev_priv->lock);
2568         return 0;
2569 out:
2570         rte_free(session->auth_key.data);
2571         rte_free(session->cipher_key.data);
2572         memset(session, 0, sizeof(dpaa_sec_session));
2573         return -1;
2574 }
2575
2576 static int
2577 dpaa_sec_security_session_create(void *dev,
2578                                  struct rte_security_session_conf *conf,
2579                                  struct rte_security_session *sess,
2580                                  struct rte_mempool *mempool)
2581 {
2582         void *sess_private_data;
2583         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2584         int ret;
2585
2586         if (rte_mempool_get(mempool, &sess_private_data)) {
2587                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2588                 return -ENOMEM;
2589         }
2590
2591         switch (conf->protocol) {
2592         case RTE_SECURITY_PROTOCOL_IPSEC:
2593                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2594                                 sess_private_data);
2595                 break;
2596         case RTE_SECURITY_PROTOCOL_PDCP:
2597                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2598                                 sess_private_data);
2599                 break;
2600         case RTE_SECURITY_PROTOCOL_MACSEC:
2601                 return -ENOTSUP;
2602         default:
2603                 return -EINVAL;
2604         }
2605         if (ret != 0) {
2606                 DPAA_SEC_ERR("failed to configure session parameters");
2607                 /* Return session to mempool */
2608                 rte_mempool_put(mempool, sess_private_data);
2609                 return ret;
2610         }
2611
2612         set_sec_session_private_data(sess, sess_private_data);
2613
2614         return ret;
2615 }
2616
2617 /** Clear the memory of session so it doesn't leave key material behind */
2618 static int
2619 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2620                 struct rte_security_session *sess)
2621 {
2622         PMD_INIT_FUNC_TRACE();
2623         void *sess_priv = get_sec_session_private_data(sess);
2624         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2625
2626         if (sess_priv) {
2627                 free_session_memory((struct rte_cryptodev *)dev, s);
2628                 set_sec_session_private_data(sess, NULL);
2629         }
2630         return 0;
2631 }
2632
2633 static int
2634 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2635                        struct rte_cryptodev_config *config __rte_unused)
2636 {
2637         PMD_INIT_FUNC_TRACE();
2638
2639         return 0;
2640 }
2641
2642 static int
2643 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2644 {
2645         PMD_INIT_FUNC_TRACE();
2646         return 0;
2647 }
2648
2649 static void
2650 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2651 {
2652         PMD_INIT_FUNC_TRACE();
2653 }
2654
2655 static int
2656 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2657 {
2658         PMD_INIT_FUNC_TRACE();
2659
2660         if (dev == NULL)
2661                 return -ENOMEM;
2662
2663         return 0;
2664 }
2665
2666 static void
2667 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2668                        struct rte_cryptodev_info *info)
2669 {
2670         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2671
2672         PMD_INIT_FUNC_TRACE();
2673         if (info != NULL) {
2674                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2675                 info->feature_flags = dev->feature_flags;
2676                 info->capabilities = dpaa_sec_capabilities;
2677                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2678                 info->driver_id = cryptodev_driver_id;
2679         }
2680 }
2681
2682 static struct rte_cryptodev_ops crypto_ops = {
2683         .dev_configure        = dpaa_sec_dev_configure,
2684         .dev_start            = dpaa_sec_dev_start,
2685         .dev_stop             = dpaa_sec_dev_stop,
2686         .dev_close            = dpaa_sec_dev_close,
2687         .dev_infos_get        = dpaa_sec_dev_infos_get,
2688         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2689         .queue_pair_release   = dpaa_sec_queue_pair_release,
2690         .queue_pair_count     = dpaa_sec_queue_pair_count,
2691         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2692         .sym_session_configure    = dpaa_sec_sym_session_configure,
2693         .sym_session_clear        = dpaa_sec_sym_session_clear
2694 };
2695
2696 static const struct rte_security_capability *
2697 dpaa_sec_capabilities_get(void *device __rte_unused)
2698 {
2699         return dpaa_sec_security_cap;
2700 }
2701
2702 static const struct rte_security_ops dpaa_sec_security_ops = {
2703         .session_create = dpaa_sec_security_session_create,
2704         .session_update = NULL,
2705         .session_stats_get = NULL,
2706         .session_destroy = dpaa_sec_security_session_destroy,
2707         .set_pkt_metadata = NULL,
2708         .capabilities_get = dpaa_sec_capabilities_get
2709 };
2710
2711 static int
2712 dpaa_sec_uninit(struct rte_cryptodev *dev)
2713 {
2714         struct dpaa_sec_dev_private *internals;
2715
2716         if (dev == NULL)
2717                 return -ENODEV;
2718
2719         internals = dev->data->dev_private;
2720         rte_free(dev->security_ctx);
2721
2722         rte_free(internals);
2723
2724         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2725                       dev->data->name, rte_socket_id());
2726
2727         return 0;
2728 }
2729
2730 static int
2731 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2732 {
2733         struct dpaa_sec_dev_private *internals;
2734         struct rte_security_ctx *security_instance;
2735         struct dpaa_sec_qp *qp;
2736         uint32_t i, flags;
2737         int ret;
2738
2739         PMD_INIT_FUNC_TRACE();
2740
2741         cryptodev->driver_id = cryptodev_driver_id;
2742         cryptodev->dev_ops = &crypto_ops;
2743
2744         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2745         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2746         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2747                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2748                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2749                         RTE_CRYPTODEV_FF_SECURITY |
2750                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2751                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2752                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2753                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2754                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2755
2756         internals = cryptodev->data->dev_private;
2757         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2758         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2759
2760         /*
2761          * For secondary processes, we don't initialise any further as primary
2762          * has already done this work. Only check we don't need a different
2763          * RX function
2764          */
2765         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2766                 DPAA_SEC_WARN("Device already init by primary process");
2767                 return 0;
2768         }
2769
2770         /* Initialize security_ctx only for primary process*/
2771         security_instance = rte_malloc("rte_security_instances_ops",
2772                                 sizeof(struct rte_security_ctx), 0);
2773         if (security_instance == NULL)
2774                 return -ENOMEM;
2775         security_instance->device = (void *)cryptodev;
2776         security_instance->ops = &dpaa_sec_security_ops;
2777         security_instance->sess_cnt = 0;
2778         cryptodev->security_ctx = security_instance;
2779
2780         rte_spinlock_init(&internals->lock);
2781         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2782                 /* init qman fq for queue pair */
2783                 qp = &internals->qps[i];
2784                 ret = dpaa_sec_init_tx(&qp->outq);
2785                 if (ret) {
2786                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2787                         goto init_error;
2788                 }
2789         }
2790
2791         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2792                 QMAN_FQ_FLAG_TO_DCPORTAL;
2793         for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2794                 /* create rx qman fq for sessions*/
2795                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2796                 if (unlikely(ret != 0)) {
2797                         DPAA_SEC_ERR("sec qman_create_fq failed");
2798                         goto init_error;
2799                 }
2800         }
2801
2802         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2803         return 0;
2804
2805 init_error:
2806         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2807
2808         dpaa_sec_uninit(cryptodev);
2809         return -EFAULT;
2810 }
2811
2812 static int
2813 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2814                                 struct rte_dpaa_device *dpaa_dev)
2815 {
2816         struct rte_cryptodev *cryptodev;
2817         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2818
2819         int retval;
2820
2821         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2822                         dpaa_dev->id.dev_id);
2823
2824         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2825         if (cryptodev == NULL)
2826                 return -ENOMEM;
2827
2828         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2829                 cryptodev->data->dev_private = rte_zmalloc_socket(
2830                                         "cryptodev private structure",
2831                                         sizeof(struct dpaa_sec_dev_private),
2832                                         RTE_CACHE_LINE_SIZE,
2833                                         rte_socket_id());
2834
2835                 if (cryptodev->data->dev_private == NULL)
2836                         rte_panic("Cannot allocate memzone for private "
2837                                         "device data");
2838         }
2839
2840         dpaa_dev->crypto_dev = cryptodev;
2841         cryptodev->device = &dpaa_dev->device;
2842
2843         /* init user callbacks */
2844         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2845
2846         /* if sec device version is not configured */
2847         if (!rta_get_sec_era()) {
2848                 const struct device_node *caam_node;
2849
2850                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2851                         const uint32_t *prop = of_get_property(caam_node,
2852                                         "fsl,sec-era",
2853                                         NULL);
2854                         if (prop) {
2855                                 rta_set_sec_era(
2856                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2857                                 break;
2858                         }
2859                 }
2860         }
2861
2862         /* Invoke PMD device initialization function */
2863         retval = dpaa_sec_dev_init(cryptodev);
2864         if (retval == 0)
2865                 return 0;
2866
2867         /* In case of error, cleanup is done */
2868         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2869                 rte_free(cryptodev->data->dev_private);
2870
2871         rte_cryptodev_pmd_release_device(cryptodev);
2872
2873         return -ENXIO;
2874 }
2875
2876 static int
2877 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2878 {
2879         struct rte_cryptodev *cryptodev;
2880         int ret;
2881
2882         cryptodev = dpaa_dev->crypto_dev;
2883         if (cryptodev == NULL)
2884                 return -ENODEV;
2885
2886         ret = dpaa_sec_uninit(cryptodev);
2887         if (ret)
2888                 return ret;
2889
2890         return rte_cryptodev_pmd_destroy(cryptodev);
2891 }
2892
2893 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2894         .drv_type = FSL_DPAA_CRYPTO,
2895         .driver = {
2896                 .name = "DPAA SEC PMD"
2897         },
2898         .probe = cryptodev_dpaa_sec_probe,
2899         .remove = cryptodev_dpaa_sec_remove,
2900 };
2901
2902 static struct cryptodev_driver dpaa_sec_crypto_drv;
2903
2904 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2905 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2906                 cryptodev_driver_id);
2907
2908 RTE_INIT(dpaa_sec_init_log)
2909 {
2910         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2911         if (dpaa_logtype_sec >= 0)
2912                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2913 }