6578d8ef5167b08bd65ae14fe67236b6291a9625
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_log.h>
41 #include <dpaax_iova_table.h>
42
43 enum rta_sec_era rta_sec_era;
44
45 int dpaa_logtype_sec;
46
47 static uint8_t cryptodev_driver_id;
48
49 static __thread struct rte_crypto_op **dpaa_sec_ops;
50 static __thread int dpaa_sec_op_nb;
51
52 static int
53 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54
55 static inline void
56 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
57 {
58         if (!ctx->fd_status) {
59                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
60         } else {
61                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
62                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63         }
64
65         /* report op status to sym->op and then free the ctx memory  */
66         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
67 }
68
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
71 {
72         struct dpaa_sec_op_ctx *ctx;
73         int retval;
74
75         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
76         if (!ctx || retval) {
77                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78                 return NULL;
79         }
80         /*
81          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84          * each packet, memset is costlier than dcbz_64().
85          */
86         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
87         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
88         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
89         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
90
91         ctx->ctx_pool = ses->ctx_pool;
92         ctx->vtop_offset = (size_t) ctx
93                                 - rte_mempool_virt2iova(ctx);
94
95         return ctx;
96 }
97
98 static inline rte_iova_t
99 dpaa_mem_vtop(void *vaddr)
100 {
101         const struct rte_memseg *ms;
102
103         ms = rte_mem_virt2memseg(vaddr, NULL);
104         if (ms) {
105                 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
106                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
107         }
108         return (size_t)NULL;
109 }
110
111 static inline void *
112 dpaa_mem_ptov(rte_iova_t paddr)
113 {
114         void *va;
115
116         va = (void *)dpaax_iova_table_get_va(paddr);
117         if (likely(va))
118                 return va;
119
120         return rte_mem_iova2virt(paddr);
121 }
122
123 static void
124 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125                    struct qman_fq *fq,
126                    const struct qm_mr_entry *msg)
127 {
128         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
129                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
130 }
131
132 /* initialize the queue with dest chan as caam chan so that
133  * all the packets in this queue could be dispatched into caam
134  */
135 static int
136 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
137                  uint32_t fqid_out)
138 {
139         struct qm_mcc_initfq fq_opts;
140         uint32_t flags;
141         int ret = -1;
142
143         /* Clear FQ options */
144         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145
146         flags = QMAN_INITFQ_FLAG_SCHED;
147         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
148                           QM_INITFQ_WE_CONTEXTB;
149
150         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
151         fq_opts.fqd.context_b = fqid_out;
152         fq_opts.fqd.dest.channel = qm_channel_caam;
153         fq_opts.fqd.dest.wq = 0;
154
155         fq_in->cb.ern  = ern_sec_fq_handler;
156
157         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158
159         ret = qman_init_fq(fq_in, flags, &fq_opts);
160         if (unlikely(ret != 0))
161                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
162
163         return ret;
164 }
165
166 /* something is put into in_fq and caam put the crypto result into out_fq */
167 static enum qman_cb_dqrr_result
168 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
169                   struct qman_fq *fq __always_unused,
170                   const struct qm_dqrr_entry *dqrr)
171 {
172         const struct qm_fd *fd;
173         struct dpaa_sec_job *job;
174         struct dpaa_sec_op_ctx *ctx;
175
176         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
177                 return qman_cb_dqrr_defer;
178
179         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
180                 return qman_cb_dqrr_consume;
181
182         fd = &dqrr->fd;
183         /* sg is embedded in an op ctx,
184          * sg[0] is for output
185          * sg[1] for input
186          */
187         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188
189         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
190         ctx->fd_status = fd->status;
191         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
192                 struct qm_sg_entry *sg_out;
193                 uint32_t len;
194                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
195                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
196
197                 sg_out = &job->sg[0];
198                 hw_sg_to_cpu(sg_out);
199                 len = sg_out->length;
200                 mbuf->pkt_len = len;
201                 while (mbuf->next != NULL) {
202                         len -= mbuf->data_len;
203                         mbuf = mbuf->next;
204                 }
205                 mbuf->data_len = len;
206         }
207         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
208         dpaa_sec_op_ending(ctx);
209
210         return qman_cb_dqrr_consume;
211 }
212
213 /* caam result is put into this queue */
214 static int
215 dpaa_sec_init_tx(struct qman_fq *fq)
216 {
217         int ret;
218         struct qm_mcc_initfq opts;
219         uint32_t flags;
220
221         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
222                 QMAN_FQ_FLAG_DYNAMIC_FQID;
223
224         ret = qman_create_fq(0, flags, fq);
225         if (unlikely(ret)) {
226                 DPAA_SEC_ERR("qman_create_fq failed");
227                 return ret;
228         }
229
230         memset(&opts, 0, sizeof(opts));
231         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
232                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
233
234         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
235
236         fq->cb.dqrr = dqrr_out_fq_cb_rx;
237         fq->cb.ern  = ern_sec_fq_handler;
238
239         ret = qman_init_fq(fq, 0, &opts);
240         if (unlikely(ret)) {
241                 DPAA_SEC_ERR("unable to init caam source fq!");
242                 return ret;
243         }
244
245         return ret;
246 }
247
248 static inline int is_cipher_only(dpaa_sec_session *ses)
249 {
250         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
251                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
252 }
253
254 static inline int is_auth_only(dpaa_sec_session *ses)
255 {
256         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
257                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
258 }
259
260 static inline int is_aead(dpaa_sec_session *ses)
261 {
262         return ((ses->cipher_alg == 0) &&
263                 (ses->auth_alg == 0) &&
264                 (ses->aead_alg != 0));
265 }
266
267 static inline int is_auth_cipher(dpaa_sec_session *ses)
268 {
269         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
270                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
271                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
272                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
273 }
274
275 static inline int is_proto_ipsec(dpaa_sec_session *ses)
276 {
277         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
278 }
279
280 static inline int is_proto_pdcp(dpaa_sec_session *ses)
281 {
282         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
283 }
284
285 static inline int is_encode(dpaa_sec_session *ses)
286 {
287         return ses->dir == DIR_ENC;
288 }
289
290 static inline int is_decode(dpaa_sec_session *ses)
291 {
292         return ses->dir == DIR_DEC;
293 }
294
295 static inline void
296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
297 {
298         switch (ses->auth_alg) {
299         case RTE_CRYPTO_AUTH_NULL:
300                 alginfo_a->algtype =
301                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
302                         OP_PCL_IPSEC_HMAC_NULL : 0;
303                 ses->digest_length = 0;
304                 break;
305         case RTE_CRYPTO_AUTH_MD5_HMAC:
306                 alginfo_a->algtype =
307                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
308                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
309                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
310                 break;
311         case RTE_CRYPTO_AUTH_SHA1_HMAC:
312                 alginfo_a->algtype =
313                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
314                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
315                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316                 break;
317         case RTE_CRYPTO_AUTH_SHA224_HMAC:
318                 alginfo_a->algtype =
319                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
320                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
321                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
322                 break;
323         case RTE_CRYPTO_AUTH_SHA256_HMAC:
324                 alginfo_a->algtype =
325                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
326                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
327                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
328                 break;
329         case RTE_CRYPTO_AUTH_SHA384_HMAC:
330                 alginfo_a->algtype =
331                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
332                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
333                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
334                 break;
335         case RTE_CRYPTO_AUTH_SHA512_HMAC:
336                 alginfo_a->algtype =
337                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
338                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
339                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
340                 break;
341         default:
342                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
343         }
344 }
345
346 static inline void
347 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
348 {
349         switch (ses->cipher_alg) {
350         case RTE_CRYPTO_CIPHER_NULL:
351                 alginfo_c->algtype =
352                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
353                         OP_PCL_IPSEC_NULL : 0;
354                 break;
355         case RTE_CRYPTO_CIPHER_AES_CBC:
356                 alginfo_c->algtype =
357                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
358                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
359                 alginfo_c->algmode = OP_ALG_AAI_CBC;
360                 break;
361         case RTE_CRYPTO_CIPHER_3DES_CBC:
362                 alginfo_c->algtype =
363                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
364                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
365                 alginfo_c->algmode = OP_ALG_AAI_CBC;
366                 break;
367         case RTE_CRYPTO_CIPHER_AES_CTR:
368                 alginfo_c->algtype =
369                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
370                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
371                 alginfo_c->algmode = OP_ALG_AAI_CTR;
372                 break;
373         default:
374                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
375         }
376 }
377
378 static inline void
379 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
380 {
381         switch (ses->aead_alg) {
382         case RTE_CRYPTO_AEAD_AES_GCM:
383                 alginfo->algtype = OP_ALG_ALGSEL_AES;
384                 alginfo->algmode = OP_ALG_AAI_GCM;
385                 break;
386         default:
387                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
388         }
389 }
390
391 static int
392 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
393 {
394         struct alginfo authdata = {0}, cipherdata = {0};
395         struct sec_cdb *cdb = &ses->cdb;
396         struct alginfo *p_authdata = NULL;
397         int32_t shared_desc_len = 0;
398         int err;
399 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
400         int swap = false;
401 #else
402         int swap = true;
403 #endif
404
405         switch (ses->cipher_alg) {
406         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
407                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
408                 break;
409         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
410                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
411                 break;
412         case RTE_CRYPTO_CIPHER_AES_CTR:
413                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
414                 break;
415         case RTE_CRYPTO_CIPHER_NULL:
416                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
417                 break;
418         default:
419                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
420                               ses->cipher_alg);
421                 return -1;
422         }
423
424         cipherdata.key = (size_t)ses->cipher_key.data;
425         cipherdata.keylen = ses->cipher_key.length;
426         cipherdata.key_enc_flags = 0;
427         cipherdata.key_type = RTA_DATA_IMM;
428
429         cdb->sh_desc[0] = cipherdata.keylen;
430         cdb->sh_desc[1] = 0;
431         cdb->sh_desc[2] = 0;
432
433         if (ses->auth_alg) {
434                 switch (ses->auth_alg) {
435                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
436                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
437                         break;
438                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
439                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
440                         break;
441                 case RTE_CRYPTO_AUTH_AES_CMAC:
442                         authdata.algtype = PDCP_AUTH_TYPE_AES;
443                         break;
444                 case RTE_CRYPTO_AUTH_NULL:
445                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
446                         break;
447                 default:
448                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
449                                       ses->auth_alg);
450                         return -1;
451                 }
452
453                 authdata.key = (size_t)ses->auth_key.data;
454                 authdata.keylen = ses->auth_key.length;
455                 authdata.key_enc_flags = 0;
456                 authdata.key_type = RTA_DATA_IMM;
457
458                 p_authdata = &authdata;
459
460                 cdb->sh_desc[1] = authdata.keylen;
461         }
462
463         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
464                                MIN_JOB_DESC_SIZE,
465                                (unsigned int *)cdb->sh_desc,
466                                &cdb->sh_desc[2], 2);
467         if (err < 0) {
468                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
469                 return err;
470         }
471
472         if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
473                 cipherdata.key =
474                         (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
475                 cipherdata.key_type = RTA_DATA_PTR;
476         }
477         if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
478                 authdata.key =
479                         (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
480                 authdata.key_type = RTA_DATA_PTR;
481         }
482
483         cdb->sh_desc[0] = 0;
484         cdb->sh_desc[1] = 0;
485         cdb->sh_desc[2] = 0;
486
487         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
488                 if (ses->dir == DIR_ENC)
489                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
490                                         cdb->sh_desc, 1, swap,
491                                         ses->pdcp.hfn,
492                                         ses->pdcp.sn_size,
493                                         ses->pdcp.bearer,
494                                         ses->pdcp.pkt_dir,
495                                         ses->pdcp.hfn_threshold,
496                                         &cipherdata, &authdata,
497                                         0);
498                 else if (ses->dir == DIR_DEC)
499                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
500                                         cdb->sh_desc, 1, swap,
501                                         ses->pdcp.hfn,
502                                         ses->pdcp.sn_size,
503                                         ses->pdcp.bearer,
504                                         ses->pdcp.pkt_dir,
505                                         ses->pdcp.hfn_threshold,
506                                         &cipherdata, &authdata,
507                                         0);
508         } else {
509                 if (ses->dir == DIR_ENC)
510                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
511                                         cdb->sh_desc, 1, swap,
512                                         ses->pdcp.sn_size,
513                                         ses->pdcp.hfn,
514                                         ses->pdcp.bearer,
515                                         ses->pdcp.pkt_dir,
516                                         ses->pdcp.hfn_threshold,
517                                         &cipherdata, p_authdata, 0);
518                 else if (ses->dir == DIR_DEC)
519                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
520                                         cdb->sh_desc, 1, swap,
521                                         ses->pdcp.sn_size,
522                                         ses->pdcp.hfn,
523                                         ses->pdcp.bearer,
524                                         ses->pdcp.pkt_dir,
525                                         ses->pdcp.hfn_threshold,
526                                         &cipherdata, p_authdata, 0);
527         }
528
529         return shared_desc_len;
530 }
531
532 /* prepare ipsec proto command block of the session */
533 static int
534 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
535 {
536         struct alginfo cipherdata = {0}, authdata = {0};
537         struct sec_cdb *cdb = &ses->cdb;
538         int32_t shared_desc_len = 0;
539         int err;
540 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
541         int swap = false;
542 #else
543         int swap = true;
544 #endif
545
546         caam_cipher_alg(ses, &cipherdata);
547         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
548                 DPAA_SEC_ERR("not supported cipher alg");
549                 return -ENOTSUP;
550         }
551
552         cipherdata.key = (size_t)ses->cipher_key.data;
553         cipherdata.keylen = ses->cipher_key.length;
554         cipherdata.key_enc_flags = 0;
555         cipherdata.key_type = RTA_DATA_IMM;
556
557         caam_auth_alg(ses, &authdata);
558         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
559                 DPAA_SEC_ERR("not supported auth alg");
560                 return -ENOTSUP;
561         }
562
563         authdata.key = (size_t)ses->auth_key.data;
564         authdata.keylen = ses->auth_key.length;
565         authdata.key_enc_flags = 0;
566         authdata.key_type = RTA_DATA_IMM;
567
568         cdb->sh_desc[0] = cipherdata.keylen;
569         cdb->sh_desc[1] = authdata.keylen;
570         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
571                                MIN_JOB_DESC_SIZE,
572                                (unsigned int *)cdb->sh_desc,
573                                &cdb->sh_desc[2], 2);
574
575         if (err < 0) {
576                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
577                 return err;
578         }
579         if (cdb->sh_desc[2] & 1)
580                 cipherdata.key_type = RTA_DATA_IMM;
581         else {
582                 cipherdata.key = (size_t)dpaa_mem_vtop(
583                                         (void *)(size_t)cipherdata.key);
584                 cipherdata.key_type = RTA_DATA_PTR;
585         }
586         if (cdb->sh_desc[2] & (1<<1))
587                 authdata.key_type = RTA_DATA_IMM;
588         else {
589                 authdata.key = (size_t)dpaa_mem_vtop(
590                                         (void *)(size_t)authdata.key);
591                 authdata.key_type = RTA_DATA_PTR;
592         }
593
594         cdb->sh_desc[0] = 0;
595         cdb->sh_desc[1] = 0;
596         cdb->sh_desc[2] = 0;
597         if (ses->dir == DIR_ENC) {
598                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
599                                 cdb->sh_desc,
600                                 true, swap, SHR_SERIAL,
601                                 &ses->encap_pdb,
602                                 (uint8_t *)&ses->ip4_hdr,
603                                 &cipherdata, &authdata);
604         } else if (ses->dir == DIR_DEC) {
605                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
606                                 cdb->sh_desc,
607                                 true, swap, SHR_SERIAL,
608                                 &ses->decap_pdb,
609                                 &cipherdata, &authdata);
610         }
611         return shared_desc_len;
612 }
613
614 /* prepare command block of the session */
615 static int
616 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
617 {
618         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
619         int32_t shared_desc_len = 0;
620         struct sec_cdb *cdb = &ses->cdb;
621         int err;
622 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
623         int swap = false;
624 #else
625         int swap = true;
626 #endif
627
628         memset(cdb, 0, sizeof(struct sec_cdb));
629
630         if (is_proto_ipsec(ses)) {
631                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
632         } else if (is_proto_pdcp(ses)) {
633                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
634         } else if (is_cipher_only(ses)) {
635                 caam_cipher_alg(ses, &alginfo_c);
636                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
637                         DPAA_SEC_ERR("not supported cipher alg");
638                         return -ENOTSUP;
639                 }
640
641                 alginfo_c.key = (size_t)ses->cipher_key.data;
642                 alginfo_c.keylen = ses->cipher_key.length;
643                 alginfo_c.key_enc_flags = 0;
644                 alginfo_c.key_type = RTA_DATA_IMM;
645
646                 shared_desc_len = cnstr_shdsc_blkcipher(
647                                                 cdb->sh_desc, true,
648                                                 swap, SHR_NEVER, &alginfo_c,
649                                                 NULL,
650                                                 ses->iv.length,
651                                                 ses->dir);
652         } else if (is_auth_only(ses)) {
653                 caam_auth_alg(ses, &alginfo_a);
654                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
655                         DPAA_SEC_ERR("not supported auth alg");
656                         return -ENOTSUP;
657                 }
658
659                 alginfo_a.key = (size_t)ses->auth_key.data;
660                 alginfo_a.keylen = ses->auth_key.length;
661                 alginfo_a.key_enc_flags = 0;
662                 alginfo_a.key_type = RTA_DATA_IMM;
663
664                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
665                                                    swap, SHR_NEVER, &alginfo_a,
666                                                    !ses->dir,
667                                                    ses->digest_length);
668         } else if (is_aead(ses)) {
669                 caam_aead_alg(ses, &alginfo);
670                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
671                         DPAA_SEC_ERR("not supported aead alg");
672                         return -ENOTSUP;
673                 }
674                 alginfo.key = (size_t)ses->aead_key.data;
675                 alginfo.keylen = ses->aead_key.length;
676                 alginfo.key_enc_flags = 0;
677                 alginfo.key_type = RTA_DATA_IMM;
678
679                 if (ses->dir == DIR_ENC)
680                         shared_desc_len = cnstr_shdsc_gcm_encap(
681                                         cdb->sh_desc, true, swap, SHR_NEVER,
682                                         &alginfo,
683                                         ses->iv.length,
684                                         ses->digest_length);
685                 else
686                         shared_desc_len = cnstr_shdsc_gcm_decap(
687                                         cdb->sh_desc, true, swap, SHR_NEVER,
688                                         &alginfo,
689                                         ses->iv.length,
690                                         ses->digest_length);
691         } else {
692                 caam_cipher_alg(ses, &alginfo_c);
693                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
694                         DPAA_SEC_ERR("not supported cipher alg");
695                         return -ENOTSUP;
696                 }
697
698                 alginfo_c.key = (size_t)ses->cipher_key.data;
699                 alginfo_c.keylen = ses->cipher_key.length;
700                 alginfo_c.key_enc_flags = 0;
701                 alginfo_c.key_type = RTA_DATA_IMM;
702
703                 caam_auth_alg(ses, &alginfo_a);
704                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
705                         DPAA_SEC_ERR("not supported auth alg");
706                         return -ENOTSUP;
707                 }
708
709                 alginfo_a.key = (size_t)ses->auth_key.data;
710                 alginfo_a.keylen = ses->auth_key.length;
711                 alginfo_a.key_enc_flags = 0;
712                 alginfo_a.key_type = RTA_DATA_IMM;
713
714                 cdb->sh_desc[0] = alginfo_c.keylen;
715                 cdb->sh_desc[1] = alginfo_a.keylen;
716                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
717                                        MIN_JOB_DESC_SIZE,
718                                        (unsigned int *)cdb->sh_desc,
719                                        &cdb->sh_desc[2], 2);
720
721                 if (err < 0) {
722                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
723                         return err;
724                 }
725                 if (cdb->sh_desc[2] & 1)
726                         alginfo_c.key_type = RTA_DATA_IMM;
727                 else {
728                         alginfo_c.key = (size_t)dpaa_mem_vtop(
729                                                 (void *)(size_t)alginfo_c.key);
730                         alginfo_c.key_type = RTA_DATA_PTR;
731                 }
732                 if (cdb->sh_desc[2] & (1<<1))
733                         alginfo_a.key_type = RTA_DATA_IMM;
734                 else {
735                         alginfo_a.key = (size_t)dpaa_mem_vtop(
736                                                 (void *)(size_t)alginfo_a.key);
737                         alginfo_a.key_type = RTA_DATA_PTR;
738                 }
739                 cdb->sh_desc[0] = 0;
740                 cdb->sh_desc[1] = 0;
741                 cdb->sh_desc[2] = 0;
742                 /* Auth_only_len is set as 0 here and it will be
743                  * overwritten in fd for each packet.
744                  */
745                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
746                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
747                                 ses->iv.length, 0,
748                                 ses->digest_length, ses->dir);
749         }
750
751         if (shared_desc_len < 0) {
752                 DPAA_SEC_ERR("error in preparing command block");
753                 return shared_desc_len;
754         }
755
756         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
757         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
758         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
759
760         return 0;
761 }
762
763 /* qp is lockless, should be accessed by only one thread */
764 static int
765 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
766 {
767         struct qman_fq *fq;
768         unsigned int pkts = 0;
769         int num_rx_bufs, ret;
770         struct qm_dqrr_entry *dq;
771         uint32_t vdqcr_flags = 0;
772
773         fq = &qp->outq;
774         /*
775          * Until request for four buffers, we provide exact number of buffers.
776          * Otherwise we do not set the QM_VDQCR_EXACT flag.
777          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
778          * requested, so we request two less in this case.
779          */
780         if (nb_ops < 4) {
781                 vdqcr_flags = QM_VDQCR_EXACT;
782                 num_rx_bufs = nb_ops;
783         } else {
784                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
785                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
786         }
787         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
788         if (ret)
789                 return 0;
790
791         do {
792                 const struct qm_fd *fd;
793                 struct dpaa_sec_job *job;
794                 struct dpaa_sec_op_ctx *ctx;
795                 struct rte_crypto_op *op;
796
797                 dq = qman_dequeue(fq);
798                 if (!dq)
799                         continue;
800
801                 fd = &dq->fd;
802                 /* sg is embedded in an op ctx,
803                  * sg[0] is for output
804                  * sg[1] for input
805                  */
806                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
807
808                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
809                 ctx->fd_status = fd->status;
810                 op = ctx->op;
811                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
812                         struct qm_sg_entry *sg_out;
813                         uint32_t len;
814                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
815                                                 op->sym->m_src : op->sym->m_dst;
816
817                         sg_out = &job->sg[0];
818                         hw_sg_to_cpu(sg_out);
819                         len = sg_out->length;
820                         mbuf->pkt_len = len;
821                         while (mbuf->next != NULL) {
822                                 len -= mbuf->data_len;
823                                 mbuf = mbuf->next;
824                         }
825                         mbuf->data_len = len;
826                 }
827                 if (!ctx->fd_status) {
828                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
829                 } else {
830                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
831                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
832                 }
833                 ops[pkts++] = op;
834
835                 /* report op status to sym->op and then free the ctx memeory */
836                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
837
838                 qman_dqrr_consume(fq, dq);
839         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
840
841         return pkts;
842 }
843
844 static inline struct dpaa_sec_job *
845 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
846 {
847         struct rte_crypto_sym_op *sym = op->sym;
848         struct rte_mbuf *mbuf = sym->m_src;
849         struct dpaa_sec_job *cf;
850         struct dpaa_sec_op_ctx *ctx;
851         struct qm_sg_entry *sg, *out_sg, *in_sg;
852         phys_addr_t start_addr;
853         uint8_t *old_digest, extra_segs;
854
855         if (is_decode(ses))
856                 extra_segs = 3;
857         else
858                 extra_segs = 2;
859
860         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
861                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
862                                 MAX_SG_ENTRIES);
863                 return NULL;
864         }
865         ctx = dpaa_sec_alloc_ctx(ses);
866         if (!ctx)
867                 return NULL;
868
869         cf = &ctx->job;
870         ctx->op = op;
871         old_digest = ctx->digest;
872
873         /* output */
874         out_sg = &cf->sg[0];
875         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
876         out_sg->length = ses->digest_length;
877         cpu_to_hw_sg(out_sg);
878
879         /* input */
880         in_sg = &cf->sg[1];
881         /* need to extend the input to a compound frame */
882         in_sg->extension = 1;
883         in_sg->final = 1;
884         in_sg->length = sym->auth.data.length;
885         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
886
887         /* 1st seg */
888         sg = in_sg + 1;
889         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
890         sg->length = mbuf->data_len - sym->auth.data.offset;
891         sg->offset = sym->auth.data.offset;
892
893         /* Successive segs */
894         mbuf = mbuf->next;
895         while (mbuf) {
896                 cpu_to_hw_sg(sg);
897                 sg++;
898                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
899                 sg->length = mbuf->data_len;
900                 mbuf = mbuf->next;
901         }
902
903         if (is_decode(ses)) {
904                 /* Digest verification case */
905                 cpu_to_hw_sg(sg);
906                 sg++;
907                 rte_memcpy(old_digest, sym->auth.digest.data,
908                                 ses->digest_length);
909                 start_addr = dpaa_mem_vtop(old_digest);
910                 qm_sg_entry_set64(sg, start_addr);
911                 sg->length = ses->digest_length;
912                 in_sg->length += ses->digest_length;
913         } else {
914                 /* Digest calculation case */
915                 sg->length -= ses->digest_length;
916         }
917         sg->final = 1;
918         cpu_to_hw_sg(sg);
919         cpu_to_hw_sg(in_sg);
920
921         return cf;
922 }
923
924 /**
925  * packet looks like:
926  *              |<----data_len------->|
927  *    |ip_header|ah_header|icv|payload|
928  *              ^
929  *              |
930  *         mbuf->pkt.data
931  */
932 static inline struct dpaa_sec_job *
933 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
934 {
935         struct rte_crypto_sym_op *sym = op->sym;
936         struct rte_mbuf *mbuf = sym->m_src;
937         struct dpaa_sec_job *cf;
938         struct dpaa_sec_op_ctx *ctx;
939         struct qm_sg_entry *sg;
940         rte_iova_t start_addr;
941         uint8_t *old_digest;
942
943         ctx = dpaa_sec_alloc_ctx(ses);
944         if (!ctx)
945                 return NULL;
946
947         cf = &ctx->job;
948         ctx->op = op;
949         old_digest = ctx->digest;
950
951         start_addr = rte_pktmbuf_iova(mbuf);
952         /* output */
953         sg = &cf->sg[0];
954         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
955         sg->length = ses->digest_length;
956         cpu_to_hw_sg(sg);
957
958         /* input */
959         sg = &cf->sg[1];
960         if (is_decode(ses)) {
961                 /* need to extend the input to a compound frame */
962                 sg->extension = 1;
963                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
964                 sg->length = sym->auth.data.length + ses->digest_length;
965                 sg->final = 1;
966                 cpu_to_hw_sg(sg);
967
968                 sg = &cf->sg[2];
969                 /* hash result or digest, save digest first */
970                 rte_memcpy(old_digest, sym->auth.digest.data,
971                            ses->digest_length);
972                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
973                 sg->length = sym->auth.data.length;
974                 cpu_to_hw_sg(sg);
975
976                 /* let's check digest by hw */
977                 start_addr = dpaa_mem_vtop(old_digest);
978                 sg++;
979                 qm_sg_entry_set64(sg, start_addr);
980                 sg->length = ses->digest_length;
981                 sg->final = 1;
982                 cpu_to_hw_sg(sg);
983         } else {
984                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
985                 sg->length = sym->auth.data.length;
986                 sg->final = 1;
987                 cpu_to_hw_sg(sg);
988         }
989
990         return cf;
991 }
992
993 static inline struct dpaa_sec_job *
994 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
995 {
996         struct rte_crypto_sym_op *sym = op->sym;
997         struct dpaa_sec_job *cf;
998         struct dpaa_sec_op_ctx *ctx;
999         struct qm_sg_entry *sg, *out_sg, *in_sg;
1000         struct rte_mbuf *mbuf;
1001         uint8_t req_segs;
1002         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1003                         ses->iv.offset);
1004
1005         if (sym->m_dst) {
1006                 mbuf = sym->m_dst;
1007                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1008         } else {
1009                 mbuf = sym->m_src;
1010                 req_segs = mbuf->nb_segs * 2 + 3;
1011         }
1012
1013         if (req_segs > MAX_SG_ENTRIES) {
1014                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1015                                 MAX_SG_ENTRIES);
1016                 return NULL;
1017         }
1018
1019         ctx = dpaa_sec_alloc_ctx(ses);
1020         if (!ctx)
1021                 return NULL;
1022
1023         cf = &ctx->job;
1024         ctx->op = op;
1025
1026         /* output */
1027         out_sg = &cf->sg[0];
1028         out_sg->extension = 1;
1029         out_sg->length = sym->cipher.data.length;
1030         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1031         cpu_to_hw_sg(out_sg);
1032
1033         /* 1st seg */
1034         sg = &cf->sg[2];
1035         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1036         sg->length = mbuf->data_len - sym->cipher.data.offset;
1037         sg->offset = sym->cipher.data.offset;
1038
1039         /* Successive segs */
1040         mbuf = mbuf->next;
1041         while (mbuf) {
1042                 cpu_to_hw_sg(sg);
1043                 sg++;
1044                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1045                 sg->length = mbuf->data_len;
1046                 mbuf = mbuf->next;
1047         }
1048         sg->final = 1;
1049         cpu_to_hw_sg(sg);
1050
1051         /* input */
1052         mbuf = sym->m_src;
1053         in_sg = &cf->sg[1];
1054         in_sg->extension = 1;
1055         in_sg->final = 1;
1056         in_sg->length = sym->cipher.data.length + ses->iv.length;
1057
1058         sg++;
1059         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1060         cpu_to_hw_sg(in_sg);
1061
1062         /* IV */
1063         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1064         sg->length = ses->iv.length;
1065         cpu_to_hw_sg(sg);
1066
1067         /* 1st seg */
1068         sg++;
1069         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1070         sg->length = mbuf->data_len - sym->cipher.data.offset;
1071         sg->offset = sym->cipher.data.offset;
1072
1073         /* Successive segs */
1074         mbuf = mbuf->next;
1075         while (mbuf) {
1076                 cpu_to_hw_sg(sg);
1077                 sg++;
1078                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1079                 sg->length = mbuf->data_len;
1080                 mbuf = mbuf->next;
1081         }
1082         sg->final = 1;
1083         cpu_to_hw_sg(sg);
1084
1085         return cf;
1086 }
1087
1088 static inline struct dpaa_sec_job *
1089 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1090 {
1091         struct rte_crypto_sym_op *sym = op->sym;
1092         struct dpaa_sec_job *cf;
1093         struct dpaa_sec_op_ctx *ctx;
1094         struct qm_sg_entry *sg;
1095         rte_iova_t src_start_addr, dst_start_addr;
1096         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1097                         ses->iv.offset);
1098
1099         ctx = dpaa_sec_alloc_ctx(ses);
1100         if (!ctx)
1101                 return NULL;
1102
1103         cf = &ctx->job;
1104         ctx->op = op;
1105
1106         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1107
1108         if (sym->m_dst)
1109                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1110         else
1111                 dst_start_addr = src_start_addr;
1112
1113         /* output */
1114         sg = &cf->sg[0];
1115         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1116         sg->length = sym->cipher.data.length + ses->iv.length;
1117         cpu_to_hw_sg(sg);
1118
1119         /* input */
1120         sg = &cf->sg[1];
1121
1122         /* need to extend the input to a compound frame */
1123         sg->extension = 1;
1124         sg->final = 1;
1125         sg->length = sym->cipher.data.length + ses->iv.length;
1126         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1127         cpu_to_hw_sg(sg);
1128
1129         sg = &cf->sg[2];
1130         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1131         sg->length = ses->iv.length;
1132         cpu_to_hw_sg(sg);
1133
1134         sg++;
1135         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1136         sg->length = sym->cipher.data.length;
1137         sg->final = 1;
1138         cpu_to_hw_sg(sg);
1139
1140         return cf;
1141 }
1142
1143 static inline struct dpaa_sec_job *
1144 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1145 {
1146         struct rte_crypto_sym_op *sym = op->sym;
1147         struct dpaa_sec_job *cf;
1148         struct dpaa_sec_op_ctx *ctx;
1149         struct qm_sg_entry *sg, *out_sg, *in_sg;
1150         struct rte_mbuf *mbuf;
1151         uint8_t req_segs;
1152         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1153                         ses->iv.offset);
1154
1155         if (sym->m_dst) {
1156                 mbuf = sym->m_dst;
1157                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1158         } else {
1159                 mbuf = sym->m_src;
1160                 req_segs = mbuf->nb_segs * 2 + 4;
1161         }
1162
1163         if (ses->auth_only_len)
1164                 req_segs++;
1165
1166         if (req_segs > MAX_SG_ENTRIES) {
1167                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1168                                 MAX_SG_ENTRIES);
1169                 return NULL;
1170         }
1171
1172         ctx = dpaa_sec_alloc_ctx(ses);
1173         if (!ctx)
1174                 return NULL;
1175
1176         cf = &ctx->job;
1177         ctx->op = op;
1178
1179         rte_prefetch0(cf->sg);
1180
1181         /* output */
1182         out_sg = &cf->sg[0];
1183         out_sg->extension = 1;
1184         if (is_encode(ses))
1185                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1186                                                 + ses->digest_length;
1187         else
1188                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1189
1190         /* output sg entries */
1191         sg = &cf->sg[2];
1192         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1193         cpu_to_hw_sg(out_sg);
1194
1195         /* 1st seg */
1196         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1197         sg->length = mbuf->data_len - sym->aead.data.offset +
1198                                         ses->auth_only_len;
1199         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1200
1201         /* Successive segs */
1202         mbuf = mbuf->next;
1203         while (mbuf) {
1204                 cpu_to_hw_sg(sg);
1205                 sg++;
1206                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1207                 sg->length = mbuf->data_len;
1208                 mbuf = mbuf->next;
1209         }
1210         sg->length -= ses->digest_length;
1211
1212         if (is_encode(ses)) {
1213                 cpu_to_hw_sg(sg);
1214                 /* set auth output */
1215                 sg++;
1216                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1217                 sg->length = ses->digest_length;
1218         }
1219         sg->final = 1;
1220         cpu_to_hw_sg(sg);
1221
1222         /* input */
1223         mbuf = sym->m_src;
1224         in_sg = &cf->sg[1];
1225         in_sg->extension = 1;
1226         in_sg->final = 1;
1227         if (is_encode(ses))
1228                 in_sg->length = ses->iv.length + sym->aead.data.length
1229                                                         + ses->auth_only_len;
1230         else
1231                 in_sg->length = ses->iv.length + sym->aead.data.length
1232                                 + ses->auth_only_len + ses->digest_length;
1233
1234         /* input sg entries */
1235         sg++;
1236         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1237         cpu_to_hw_sg(in_sg);
1238
1239         /* 1st seg IV */
1240         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1241         sg->length = ses->iv.length;
1242         cpu_to_hw_sg(sg);
1243
1244         /* 2nd seg auth only */
1245         if (ses->auth_only_len) {
1246                 sg++;
1247                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1248                 sg->length = ses->auth_only_len;
1249                 cpu_to_hw_sg(sg);
1250         }
1251
1252         /* 3rd seg */
1253         sg++;
1254         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1255         sg->length = mbuf->data_len - sym->aead.data.offset;
1256         sg->offset = sym->aead.data.offset;
1257
1258         /* Successive segs */
1259         mbuf = mbuf->next;
1260         while (mbuf) {
1261                 cpu_to_hw_sg(sg);
1262                 sg++;
1263                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1264                 sg->length = mbuf->data_len;
1265                 mbuf = mbuf->next;
1266         }
1267
1268         if (is_decode(ses)) {
1269                 cpu_to_hw_sg(sg);
1270                 sg++;
1271                 memcpy(ctx->digest, sym->aead.digest.data,
1272                         ses->digest_length);
1273                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1274                 sg->length = ses->digest_length;
1275         }
1276         sg->final = 1;
1277         cpu_to_hw_sg(sg);
1278
1279         return cf;
1280 }
1281
1282 static inline struct dpaa_sec_job *
1283 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1284 {
1285         struct rte_crypto_sym_op *sym = op->sym;
1286         struct dpaa_sec_job *cf;
1287         struct dpaa_sec_op_ctx *ctx;
1288         struct qm_sg_entry *sg;
1289         uint32_t length = 0;
1290         rte_iova_t src_start_addr, dst_start_addr;
1291         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1292                         ses->iv.offset);
1293
1294         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1295
1296         if (sym->m_dst)
1297                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1298         else
1299                 dst_start_addr = src_start_addr;
1300
1301         ctx = dpaa_sec_alloc_ctx(ses);
1302         if (!ctx)
1303                 return NULL;
1304
1305         cf = &ctx->job;
1306         ctx->op = op;
1307
1308         /* input */
1309         rte_prefetch0(cf->sg);
1310         sg = &cf->sg[2];
1311         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1312         if (is_encode(ses)) {
1313                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1314                 sg->length = ses->iv.length;
1315                 length += sg->length;
1316                 cpu_to_hw_sg(sg);
1317
1318                 sg++;
1319                 if (ses->auth_only_len) {
1320                         qm_sg_entry_set64(sg,
1321                                           dpaa_mem_vtop(sym->aead.aad.data));
1322                         sg->length = ses->auth_only_len;
1323                         length += sg->length;
1324                         cpu_to_hw_sg(sg);
1325                         sg++;
1326                 }
1327                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1328                 sg->length = sym->aead.data.length;
1329                 length += sg->length;
1330                 sg->final = 1;
1331                 cpu_to_hw_sg(sg);
1332         } else {
1333                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1334                 sg->length = ses->iv.length;
1335                 length += sg->length;
1336                 cpu_to_hw_sg(sg);
1337
1338                 sg++;
1339                 if (ses->auth_only_len) {
1340                         qm_sg_entry_set64(sg,
1341                                           dpaa_mem_vtop(sym->aead.aad.data));
1342                         sg->length = ses->auth_only_len;
1343                         length += sg->length;
1344                         cpu_to_hw_sg(sg);
1345                         sg++;
1346                 }
1347                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1348                 sg->length = sym->aead.data.length;
1349                 length += sg->length;
1350                 cpu_to_hw_sg(sg);
1351
1352                 memcpy(ctx->digest, sym->aead.digest.data,
1353                        ses->digest_length);
1354                 sg++;
1355
1356                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1357                 sg->length = ses->digest_length;
1358                 length += sg->length;
1359                 sg->final = 1;
1360                 cpu_to_hw_sg(sg);
1361         }
1362         /* input compound frame */
1363         cf->sg[1].length = length;
1364         cf->sg[1].extension = 1;
1365         cf->sg[1].final = 1;
1366         cpu_to_hw_sg(&cf->sg[1]);
1367
1368         /* output */
1369         sg++;
1370         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1371         qm_sg_entry_set64(sg,
1372                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1373         sg->length = sym->aead.data.length + ses->auth_only_len;
1374         length = sg->length;
1375         if (is_encode(ses)) {
1376                 cpu_to_hw_sg(sg);
1377                 /* set auth output */
1378                 sg++;
1379                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1380                 sg->length = ses->digest_length;
1381                 length += sg->length;
1382         }
1383         sg->final = 1;
1384         cpu_to_hw_sg(sg);
1385
1386         /* output compound frame */
1387         cf->sg[0].length = length;
1388         cf->sg[0].extension = 1;
1389         cpu_to_hw_sg(&cf->sg[0]);
1390
1391         return cf;
1392 }
1393
1394 static inline struct dpaa_sec_job *
1395 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1396 {
1397         struct rte_crypto_sym_op *sym = op->sym;
1398         struct dpaa_sec_job *cf;
1399         struct dpaa_sec_op_ctx *ctx;
1400         struct qm_sg_entry *sg, *out_sg, *in_sg;
1401         struct rte_mbuf *mbuf;
1402         uint8_t req_segs;
1403         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1404                         ses->iv.offset);
1405
1406         if (sym->m_dst) {
1407                 mbuf = sym->m_dst;
1408                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1409         } else {
1410                 mbuf = sym->m_src;
1411                 req_segs = mbuf->nb_segs * 2 + 4;
1412         }
1413
1414         if (req_segs > MAX_SG_ENTRIES) {
1415                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1416                                 MAX_SG_ENTRIES);
1417                 return NULL;
1418         }
1419
1420         ctx = dpaa_sec_alloc_ctx(ses);
1421         if (!ctx)
1422                 return NULL;
1423
1424         cf = &ctx->job;
1425         ctx->op = op;
1426
1427         rte_prefetch0(cf->sg);
1428
1429         /* output */
1430         out_sg = &cf->sg[0];
1431         out_sg->extension = 1;
1432         if (is_encode(ses))
1433                 out_sg->length = sym->auth.data.length + ses->digest_length;
1434         else
1435                 out_sg->length = sym->auth.data.length;
1436
1437         /* output sg entries */
1438         sg = &cf->sg[2];
1439         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1440         cpu_to_hw_sg(out_sg);
1441
1442         /* 1st seg */
1443         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1444         sg->length = mbuf->data_len - sym->auth.data.offset;
1445         sg->offset = sym->auth.data.offset;
1446
1447         /* Successive segs */
1448         mbuf = mbuf->next;
1449         while (mbuf) {
1450                 cpu_to_hw_sg(sg);
1451                 sg++;
1452                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1453                 sg->length = mbuf->data_len;
1454                 mbuf = mbuf->next;
1455         }
1456         sg->length -= ses->digest_length;
1457
1458         if (is_encode(ses)) {
1459                 cpu_to_hw_sg(sg);
1460                 /* set auth output */
1461                 sg++;
1462                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1463                 sg->length = ses->digest_length;
1464         }
1465         sg->final = 1;
1466         cpu_to_hw_sg(sg);
1467
1468         /* input */
1469         mbuf = sym->m_src;
1470         in_sg = &cf->sg[1];
1471         in_sg->extension = 1;
1472         in_sg->final = 1;
1473         if (is_encode(ses))
1474                 in_sg->length = ses->iv.length + sym->auth.data.length;
1475         else
1476                 in_sg->length = ses->iv.length + sym->auth.data.length
1477                                                 + ses->digest_length;
1478
1479         /* input sg entries */
1480         sg++;
1481         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1482         cpu_to_hw_sg(in_sg);
1483
1484         /* 1st seg IV */
1485         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1486         sg->length = ses->iv.length;
1487         cpu_to_hw_sg(sg);
1488
1489         /* 2nd seg */
1490         sg++;
1491         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1492         sg->length = mbuf->data_len - sym->auth.data.offset;
1493         sg->offset = sym->auth.data.offset;
1494
1495         /* Successive segs */
1496         mbuf = mbuf->next;
1497         while (mbuf) {
1498                 cpu_to_hw_sg(sg);
1499                 sg++;
1500                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1501                 sg->length = mbuf->data_len;
1502                 mbuf = mbuf->next;
1503         }
1504
1505         sg->length -= ses->digest_length;
1506         if (is_decode(ses)) {
1507                 cpu_to_hw_sg(sg);
1508                 sg++;
1509                 memcpy(ctx->digest, sym->auth.digest.data,
1510                         ses->digest_length);
1511                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1512                 sg->length = ses->digest_length;
1513         }
1514         sg->final = 1;
1515         cpu_to_hw_sg(sg);
1516
1517         return cf;
1518 }
1519
1520 static inline struct dpaa_sec_job *
1521 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1522 {
1523         struct rte_crypto_sym_op *sym = op->sym;
1524         struct dpaa_sec_job *cf;
1525         struct dpaa_sec_op_ctx *ctx;
1526         struct qm_sg_entry *sg;
1527         rte_iova_t src_start_addr, dst_start_addr;
1528         uint32_t length = 0;
1529         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1530                         ses->iv.offset);
1531
1532         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1533         if (sym->m_dst)
1534                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1535         else
1536                 dst_start_addr = src_start_addr;
1537
1538         ctx = dpaa_sec_alloc_ctx(ses);
1539         if (!ctx)
1540                 return NULL;
1541
1542         cf = &ctx->job;
1543         ctx->op = op;
1544
1545         /* input */
1546         rte_prefetch0(cf->sg);
1547         sg = &cf->sg[2];
1548         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1549         if (is_encode(ses)) {
1550                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1551                 sg->length = ses->iv.length;
1552                 length += sg->length;
1553                 cpu_to_hw_sg(sg);
1554
1555                 sg++;
1556                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1557                 sg->length = sym->auth.data.length;
1558                 length += sg->length;
1559                 sg->final = 1;
1560                 cpu_to_hw_sg(sg);
1561         } else {
1562                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1563                 sg->length = ses->iv.length;
1564                 length += sg->length;
1565                 cpu_to_hw_sg(sg);
1566
1567                 sg++;
1568
1569                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1570                 sg->length = sym->auth.data.length;
1571                 length += sg->length;
1572                 cpu_to_hw_sg(sg);
1573
1574                 memcpy(ctx->digest, sym->auth.digest.data,
1575                        ses->digest_length);
1576                 sg++;
1577
1578                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1579                 sg->length = ses->digest_length;
1580                 length += sg->length;
1581                 sg->final = 1;
1582                 cpu_to_hw_sg(sg);
1583         }
1584         /* input compound frame */
1585         cf->sg[1].length = length;
1586         cf->sg[1].extension = 1;
1587         cf->sg[1].final = 1;
1588         cpu_to_hw_sg(&cf->sg[1]);
1589
1590         /* output */
1591         sg++;
1592         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1593         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1594         sg->length = sym->cipher.data.length;
1595         length = sg->length;
1596         if (is_encode(ses)) {
1597                 cpu_to_hw_sg(sg);
1598                 /* set auth output */
1599                 sg++;
1600                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1601                 sg->length = ses->digest_length;
1602                 length += sg->length;
1603         }
1604         sg->final = 1;
1605         cpu_to_hw_sg(sg);
1606
1607         /* output compound frame */
1608         cf->sg[0].length = length;
1609         cf->sg[0].extension = 1;
1610         cpu_to_hw_sg(&cf->sg[0]);
1611
1612         return cf;
1613 }
1614
1615 static inline struct dpaa_sec_job *
1616 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1617 {
1618         struct rte_crypto_sym_op *sym = op->sym;
1619         struct dpaa_sec_job *cf;
1620         struct dpaa_sec_op_ctx *ctx;
1621         struct qm_sg_entry *sg;
1622         phys_addr_t src_start_addr, dst_start_addr;
1623
1624         ctx = dpaa_sec_alloc_ctx(ses);
1625         if (!ctx)
1626                 return NULL;
1627         cf = &ctx->job;
1628         ctx->op = op;
1629
1630         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1631
1632         if (sym->m_dst)
1633                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1634         else
1635                 dst_start_addr = src_start_addr;
1636
1637         /* input */
1638         sg = &cf->sg[1];
1639         qm_sg_entry_set64(sg, src_start_addr);
1640         sg->length = sym->m_src->pkt_len;
1641         sg->final = 1;
1642         cpu_to_hw_sg(sg);
1643
1644         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1645         /* output */
1646         sg = &cf->sg[0];
1647         qm_sg_entry_set64(sg, dst_start_addr);
1648         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1649         cpu_to_hw_sg(sg);
1650
1651         return cf;
1652 }
1653
1654 static inline struct dpaa_sec_job *
1655 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1656 {
1657         struct rte_crypto_sym_op *sym = op->sym;
1658         struct dpaa_sec_job *cf;
1659         struct dpaa_sec_op_ctx *ctx;
1660         struct qm_sg_entry *sg, *out_sg, *in_sg;
1661         struct rte_mbuf *mbuf;
1662         uint8_t req_segs;
1663         uint32_t in_len = 0, out_len = 0;
1664
1665         if (sym->m_dst)
1666                 mbuf = sym->m_dst;
1667         else
1668                 mbuf = sym->m_src;
1669
1670         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1671         if (req_segs > MAX_SG_ENTRIES) {
1672                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1673                                 MAX_SG_ENTRIES);
1674                 return NULL;
1675         }
1676
1677         ctx = dpaa_sec_alloc_ctx(ses);
1678         if (!ctx)
1679                 return NULL;
1680         cf = &ctx->job;
1681         ctx->op = op;
1682         /* output */
1683         out_sg = &cf->sg[0];
1684         out_sg->extension = 1;
1685         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1686
1687         /* 1st seg */
1688         sg = &cf->sg[2];
1689         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1690         sg->offset = 0;
1691
1692         /* Successive segs */
1693         while (mbuf->next) {
1694                 sg->length = mbuf->data_len;
1695                 out_len += sg->length;
1696                 mbuf = mbuf->next;
1697                 cpu_to_hw_sg(sg);
1698                 sg++;
1699                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1700                 sg->offset = 0;
1701         }
1702         sg->length = mbuf->buf_len - mbuf->data_off;
1703         out_len += sg->length;
1704         sg->final = 1;
1705         cpu_to_hw_sg(sg);
1706
1707         out_sg->length = out_len;
1708         cpu_to_hw_sg(out_sg);
1709
1710         /* input */
1711         mbuf = sym->m_src;
1712         in_sg = &cf->sg[1];
1713         in_sg->extension = 1;
1714         in_sg->final = 1;
1715         in_len = mbuf->data_len;
1716
1717         sg++;
1718         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1719
1720         /* 1st seg */
1721         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1722         sg->length = mbuf->data_len;
1723         sg->offset = 0;
1724
1725         /* Successive segs */
1726         mbuf = mbuf->next;
1727         while (mbuf) {
1728                 cpu_to_hw_sg(sg);
1729                 sg++;
1730                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1731                 sg->length = mbuf->data_len;
1732                 sg->offset = 0;
1733                 in_len += sg->length;
1734                 mbuf = mbuf->next;
1735         }
1736         sg->final = 1;
1737         cpu_to_hw_sg(sg);
1738
1739         in_sg->length = in_len;
1740         cpu_to_hw_sg(in_sg);
1741
1742         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1743
1744         return cf;
1745 }
1746
1747 static uint16_t
1748 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1749                        uint16_t nb_ops)
1750 {
1751         /* Function to transmit the frames to given device and queuepair */
1752         uint32_t loop;
1753         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1754         uint16_t num_tx = 0;
1755         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1756         uint32_t frames_to_send;
1757         struct rte_crypto_op *op;
1758         struct dpaa_sec_job *cf;
1759         dpaa_sec_session *ses;
1760         uint32_t auth_only_len;
1761         struct qman_fq *inq[DPAA_SEC_BURST];
1762
1763         while (nb_ops) {
1764                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1765                                 DPAA_SEC_BURST : nb_ops;
1766                 for (loop = 0; loop < frames_to_send; loop++) {
1767                         op = *(ops++);
1768                         switch (op->sess_type) {
1769                         case RTE_CRYPTO_OP_WITH_SESSION:
1770                                 ses = (dpaa_sec_session *)
1771                                         get_sym_session_private_data(
1772                                                         op->sym->session,
1773                                                         cryptodev_driver_id);
1774                                 break;
1775                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1776                                 ses = (dpaa_sec_session *)
1777                                         get_sec_session_private_data(
1778                                                         op->sym->sec_session);
1779                                 break;
1780                         default:
1781                                 DPAA_SEC_DP_ERR(
1782                                         "sessionless crypto op not supported");
1783                                 frames_to_send = loop;
1784                                 nb_ops = loop;
1785                                 goto send_pkts;
1786                         }
1787                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1788                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1789                                         frames_to_send = loop;
1790                                         nb_ops = loop;
1791                                         goto send_pkts;
1792                                 }
1793                         } else if (unlikely(ses->qp[rte_lcore_id() %
1794                                                 MAX_DPAA_CORES] != qp)) {
1795                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1796                                         " New qp = %p\n",
1797                                         ses->qp[rte_lcore_id() %
1798                                         MAX_DPAA_CORES], qp);
1799                                 frames_to_send = loop;
1800                                 nb_ops = loop;
1801                                 goto send_pkts;
1802                         }
1803
1804                         auth_only_len = op->sym->auth.data.length -
1805                                                 op->sym->cipher.data.length;
1806                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1807                                   ((op->sym->m_dst == NULL) ||
1808                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1809                                 if (is_proto_ipsec(ses)) {
1810                                         cf = build_proto(op, ses);
1811                                 } else if (is_proto_pdcp(ses)) {
1812                                         cf = build_proto(op, ses);
1813                                 } else if (is_auth_only(ses)) {
1814                                         cf = build_auth_only(op, ses);
1815                                 } else if (is_cipher_only(ses)) {
1816                                         cf = build_cipher_only(op, ses);
1817                                 } else if (is_aead(ses)) {
1818                                         cf = build_cipher_auth_gcm(op, ses);
1819                                         auth_only_len = ses->auth_only_len;
1820                                 } else if (is_auth_cipher(ses)) {
1821                                         cf = build_cipher_auth(op, ses);
1822                                 } else {
1823                                         DPAA_SEC_DP_ERR("not supported ops");
1824                                         frames_to_send = loop;
1825                                         nb_ops = loop;
1826                                         goto send_pkts;
1827                                 }
1828                         } else {
1829                                 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1830                                         cf = build_proto_sg(op, ses);
1831                                 } else if (is_auth_only(ses)) {
1832                                         cf = build_auth_only_sg(op, ses);
1833                                 } else if (is_cipher_only(ses)) {
1834                                         cf = build_cipher_only_sg(op, ses);
1835                                 } else if (is_aead(ses)) {
1836                                         cf = build_cipher_auth_gcm_sg(op, ses);
1837                                         auth_only_len = ses->auth_only_len;
1838                                 } else if (is_auth_cipher(ses)) {
1839                                         cf = build_cipher_auth_sg(op, ses);
1840                                 } else {
1841                                         DPAA_SEC_DP_ERR("not supported ops");
1842                                         frames_to_send = loop;
1843                                         nb_ops = loop;
1844                                         goto send_pkts;
1845                                 }
1846                         }
1847                         if (unlikely(!cf)) {
1848                                 frames_to_send = loop;
1849                                 nb_ops = loop;
1850                                 goto send_pkts;
1851                         }
1852
1853                         fd = &fds[loop];
1854                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1855                         fd->opaque_addr = 0;
1856                         fd->cmd = 0;
1857                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1858                         fd->_format1 = qm_fd_compound;
1859                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1860                         /* Auth_only_len is set as 0 in descriptor and it is
1861                          * overwritten here in the fd.cmd which will update
1862                          * the DPOVRD reg.
1863                          */
1864                         if (auth_only_len)
1865                                 fd->cmd = 0x80000000 | auth_only_len;
1866
1867                         /* In case of PDCP, per packet HFN is stored in
1868                          * mbuf priv after sym_op.
1869                          */
1870                         if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1871                                 fd->cmd = 0x80000000 |
1872                                         *((uint32_t *)((uint8_t *)op +
1873                                         ses->pdcp.hfn_ovd_offset));
1874                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1875                                         *((uint32_t *)((uint8_t *)op +
1876                                         ses->pdcp.hfn_ovd_offset)),
1877                                         ses->pdcp.hfn_ovd,
1878                                         is_proto_pdcp(ses));
1879                         }
1880
1881                 }
1882 send_pkts:
1883                 loop = 0;
1884                 while (loop < frames_to_send) {
1885                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1886                                         frames_to_send - loop);
1887                 }
1888                 nb_ops -= frames_to_send;
1889                 num_tx += frames_to_send;
1890         }
1891
1892         dpaa_qp->tx_pkts += num_tx;
1893         dpaa_qp->tx_errs += nb_ops - num_tx;
1894
1895         return num_tx;
1896 }
1897
1898 static uint16_t
1899 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1900                        uint16_t nb_ops)
1901 {
1902         uint16_t num_rx;
1903         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1904
1905         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1906
1907         dpaa_qp->rx_pkts += num_rx;
1908         dpaa_qp->rx_errs += nb_ops - num_rx;
1909
1910         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1911
1912         return num_rx;
1913 }
1914
1915 /** Release queue pair */
1916 static int
1917 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1918                             uint16_t qp_id)
1919 {
1920         struct dpaa_sec_dev_private *internals;
1921         struct dpaa_sec_qp *qp = NULL;
1922
1923         PMD_INIT_FUNC_TRACE();
1924
1925         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1926
1927         internals = dev->data->dev_private;
1928         if (qp_id >= internals->max_nb_queue_pairs) {
1929                 DPAA_SEC_ERR("Max supported qpid %d",
1930                              internals->max_nb_queue_pairs);
1931                 return -EINVAL;
1932         }
1933
1934         qp = &internals->qps[qp_id];
1935         qp->internals = NULL;
1936         dev->data->queue_pairs[qp_id] = NULL;
1937
1938         return 0;
1939 }
1940
1941 /** Setup a queue pair */
1942 static int
1943 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1944                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1945                 __rte_unused int socket_id)
1946 {
1947         struct dpaa_sec_dev_private *internals;
1948         struct dpaa_sec_qp *qp = NULL;
1949
1950         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1951
1952         internals = dev->data->dev_private;
1953         if (qp_id >= internals->max_nb_queue_pairs) {
1954                 DPAA_SEC_ERR("Max supported qpid %d",
1955                              internals->max_nb_queue_pairs);
1956                 return -EINVAL;
1957         }
1958
1959         qp = &internals->qps[qp_id];
1960         qp->internals = internals;
1961         dev->data->queue_pairs[qp_id] = qp;
1962
1963         return 0;
1964 }
1965
1966 /** Return the number of allocated queue pairs */
1967 static uint32_t
1968 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1969 {
1970         PMD_INIT_FUNC_TRACE();
1971
1972         return dev->data->nb_queue_pairs;
1973 }
1974
1975 /** Returns the size of session structure */
1976 static unsigned int
1977 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1978 {
1979         PMD_INIT_FUNC_TRACE();
1980
1981         return sizeof(dpaa_sec_session);
1982 }
1983
1984 static int
1985 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1986                      struct rte_crypto_sym_xform *xform,
1987                      dpaa_sec_session *session)
1988 {
1989         session->cipher_alg = xform->cipher.algo;
1990         session->iv.length = xform->cipher.iv.length;
1991         session->iv.offset = xform->cipher.iv.offset;
1992         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1993                                                RTE_CACHE_LINE_SIZE);
1994         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1995                 DPAA_SEC_ERR("No Memory for cipher key");
1996                 return -ENOMEM;
1997         }
1998         session->cipher_key.length = xform->cipher.key.length;
1999
2000         memcpy(session->cipher_key.data, xform->cipher.key.data,
2001                xform->cipher.key.length);
2002         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2003                         DIR_ENC : DIR_DEC;
2004
2005         return 0;
2006 }
2007
2008 static int
2009 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2010                    struct rte_crypto_sym_xform *xform,
2011                    dpaa_sec_session *session)
2012 {
2013         session->auth_alg = xform->auth.algo;
2014         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2015                                              RTE_CACHE_LINE_SIZE);
2016         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2017                 DPAA_SEC_ERR("No Memory for auth key");
2018                 return -ENOMEM;
2019         }
2020         session->auth_key.length = xform->auth.key.length;
2021         session->digest_length = xform->auth.digest_length;
2022
2023         memcpy(session->auth_key.data, xform->auth.key.data,
2024                xform->auth.key.length);
2025         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2026                         DIR_ENC : DIR_DEC;
2027
2028         return 0;
2029 }
2030
2031 static int
2032 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2033                    struct rte_crypto_sym_xform *xform,
2034                    dpaa_sec_session *session)
2035 {
2036         session->aead_alg = xform->aead.algo;
2037         session->iv.length = xform->aead.iv.length;
2038         session->iv.offset = xform->aead.iv.offset;
2039         session->auth_only_len = xform->aead.aad_length;
2040         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2041                                              RTE_CACHE_LINE_SIZE);
2042         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2043                 DPAA_SEC_ERR("No Memory for aead key\n");
2044                 return -ENOMEM;
2045         }
2046         session->aead_key.length = xform->aead.key.length;
2047         session->digest_length = xform->aead.digest_length;
2048
2049         memcpy(session->aead_key.data, xform->aead.key.data,
2050                xform->aead.key.length);
2051         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2052                         DIR_ENC : DIR_DEC;
2053
2054         return 0;
2055 }
2056
2057 static struct qman_fq *
2058 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2059 {
2060         unsigned int i;
2061
2062         for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2063                 if (qi->inq_attach[i] == 0) {
2064                         qi->inq_attach[i] = 1;
2065                         return &qi->inq[i];
2066                 }
2067         }
2068         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2069
2070         return NULL;
2071 }
2072
2073 static int
2074 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2075 {
2076         unsigned int i;
2077
2078         for (i = 0; i < qi->max_nb_sessions; i++) {
2079                 if (&qi->inq[i] == fq) {
2080                         qman_retire_fq(fq, NULL);
2081                         qman_oos_fq(fq);
2082                         qi->inq_attach[i] = 0;
2083                         return 0;
2084                 }
2085         }
2086         return -1;
2087 }
2088
2089 static int
2090 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2091 {
2092         int ret;
2093
2094         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2095         ret = dpaa_sec_prep_cdb(sess);
2096         if (ret) {
2097                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2098                 return -1;
2099         }
2100         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2101                 ret = rte_dpaa_portal_init((void *)0);
2102                 if (ret) {
2103                         DPAA_SEC_ERR("Failure in affining portal");
2104                         return ret;
2105                 }
2106         }
2107         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2108                                dpaa_mem_vtop(&sess->cdb),
2109                                qman_fq_fqid(&qp->outq));
2110         if (ret)
2111                 DPAA_SEC_ERR("Unable to init sec queue");
2112
2113         return ret;
2114 }
2115
2116 static int
2117 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2118                             struct rte_crypto_sym_xform *xform, void *sess)
2119 {
2120         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2121         dpaa_sec_session *session = sess;
2122         uint32_t i;
2123
2124         PMD_INIT_FUNC_TRACE();
2125
2126         if (unlikely(sess == NULL)) {
2127                 DPAA_SEC_ERR("invalid session struct");
2128                 return -EINVAL;
2129         }
2130         memset(session, 0, sizeof(dpaa_sec_session));
2131
2132         /* Default IV length = 0 */
2133         session->iv.length = 0;
2134
2135         /* Cipher Only */
2136         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2137                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2138                 dpaa_sec_cipher_init(dev, xform, session);
2139
2140         /* Authentication Only */
2141         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2142                    xform->next == NULL) {
2143                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2144                 dpaa_sec_auth_init(dev, xform, session);
2145
2146         /* Cipher then Authenticate */
2147         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2148                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2149                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2150                         dpaa_sec_cipher_init(dev, xform, session);
2151                         dpaa_sec_auth_init(dev, xform->next, session);
2152                 } else {
2153                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2154                         return -EINVAL;
2155                 }
2156
2157         /* Authenticate then Cipher */
2158         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2159                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2160                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2161                         dpaa_sec_auth_init(dev, xform, session);
2162                         dpaa_sec_cipher_init(dev, xform->next, session);
2163                 } else {
2164                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2165                         return -EINVAL;
2166                 }
2167
2168         /* AEAD operation for AES-GCM kind of Algorithms */
2169         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2170                    xform->next == NULL) {
2171                 dpaa_sec_aead_init(dev, xform, session);
2172
2173         } else {
2174                 DPAA_SEC_ERR("Invalid crypto type");
2175                 return -EINVAL;
2176         }
2177         session->ctx_pool = internals->ctx_pool;
2178         rte_spinlock_lock(&internals->lock);
2179         for (i = 0; i < MAX_DPAA_CORES; i++) {
2180                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2181                 if (session->inq[i] == NULL) {
2182                         DPAA_SEC_ERR("unable to attach sec queue");
2183                         rte_spinlock_unlock(&internals->lock);
2184                         goto err1;
2185                 }
2186         }
2187         rte_spinlock_unlock(&internals->lock);
2188
2189         return 0;
2190
2191 err1:
2192         rte_free(session->cipher_key.data);
2193         rte_free(session->auth_key.data);
2194         memset(session, 0, sizeof(dpaa_sec_session));
2195
2196         return -EINVAL;
2197 }
2198
2199 static int
2200 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2201                 struct rte_crypto_sym_xform *xform,
2202                 struct rte_cryptodev_sym_session *sess,
2203                 struct rte_mempool *mempool)
2204 {
2205         void *sess_private_data;
2206         int ret;
2207
2208         PMD_INIT_FUNC_TRACE();
2209
2210         if (rte_mempool_get(mempool, &sess_private_data)) {
2211                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2212                 return -ENOMEM;
2213         }
2214
2215         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2216         if (ret != 0) {
2217                 DPAA_SEC_ERR("failed to configure session parameters");
2218
2219                 /* Return session to mempool */
2220                 rte_mempool_put(mempool, sess_private_data);
2221                 return ret;
2222         }
2223
2224         set_sym_session_private_data(sess, dev->driver_id,
2225                         sess_private_data);
2226
2227
2228         return 0;
2229 }
2230
2231 static inline void
2232 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2233 {
2234         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2235         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2236         uint8_t i;
2237
2238         for (i = 0; i < MAX_DPAA_CORES; i++) {
2239                 if (s->inq[i])
2240                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2241                 s->inq[i] = NULL;
2242                 s->qp[i] = NULL;
2243         }
2244         rte_free(s->cipher_key.data);
2245         rte_free(s->auth_key.data);
2246         memset(s, 0, sizeof(dpaa_sec_session));
2247         rte_mempool_put(sess_mp, (void *)s);
2248 }
2249
2250 /** Clear the memory of session so it doesn't leave key material behind */
2251 static void
2252 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2253                 struct rte_cryptodev_sym_session *sess)
2254 {
2255         PMD_INIT_FUNC_TRACE();
2256         uint8_t index = dev->driver_id;
2257         void *sess_priv = get_sym_session_private_data(sess, index);
2258         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2259
2260         if (sess_priv) {
2261                 free_session_memory(dev, s);
2262                 set_sym_session_private_data(sess, index, NULL);
2263         }
2264 }
2265
2266 static int
2267 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2268                            struct rte_security_session_conf *conf,
2269                            void *sess)
2270 {
2271         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2272         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2273         struct rte_crypto_auth_xform *auth_xform = NULL;
2274         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2275         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2276         uint32_t i;
2277
2278         PMD_INIT_FUNC_TRACE();
2279
2280         memset(session, 0, sizeof(dpaa_sec_session));
2281         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2282                 cipher_xform = &conf->crypto_xform->cipher;
2283                 if (conf->crypto_xform->next)
2284                         auth_xform = &conf->crypto_xform->next->auth;
2285         } else {
2286                 auth_xform = &conf->crypto_xform->auth;
2287                 if (conf->crypto_xform->next)
2288                         cipher_xform = &conf->crypto_xform->next->cipher;
2289         }
2290         session->proto_alg = conf->protocol;
2291
2292         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2293                 session->cipher_key.data = rte_zmalloc(NULL,
2294                                                        cipher_xform->key.length,
2295                                                        RTE_CACHE_LINE_SIZE);
2296                 if (session->cipher_key.data == NULL &&
2297                                 cipher_xform->key.length > 0) {
2298                         DPAA_SEC_ERR("No Memory for cipher key");
2299                         return -ENOMEM;
2300                 }
2301                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2302                                 cipher_xform->key.length);
2303                 session->cipher_key.length = cipher_xform->key.length;
2304
2305                 switch (cipher_xform->algo) {
2306                 case RTE_CRYPTO_CIPHER_AES_CBC:
2307                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2308                 case RTE_CRYPTO_CIPHER_AES_CTR:
2309                         break;
2310                 default:
2311                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2312                                 cipher_xform->algo);
2313                         goto out;
2314                 }
2315                 session->cipher_alg = cipher_xform->algo;
2316         } else {
2317                 session->cipher_key.data = NULL;
2318                 session->cipher_key.length = 0;
2319                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2320         }
2321
2322         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2323                 session->auth_key.data = rte_zmalloc(NULL,
2324                                                 auth_xform->key.length,
2325                                                 RTE_CACHE_LINE_SIZE);
2326                 if (session->auth_key.data == NULL &&
2327                                 auth_xform->key.length > 0) {
2328                         DPAA_SEC_ERR("No Memory for auth key");
2329                         rte_free(session->cipher_key.data);
2330                         return -ENOMEM;
2331                 }
2332                 memcpy(session->auth_key.data, auth_xform->key.data,
2333                                 auth_xform->key.length);
2334                 session->auth_key.length = auth_xform->key.length;
2335
2336                 switch (auth_xform->algo) {
2337                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2338                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2339                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2340                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2341                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2342                 case RTE_CRYPTO_AUTH_AES_CMAC:
2343                         break;
2344                 default:
2345                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2346                                 auth_xform->algo);
2347                         goto out;
2348                 }
2349                 session->auth_alg = auth_xform->algo;
2350         } else {
2351                 session->auth_key.data = NULL;
2352                 session->auth_key.length = 0;
2353                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2354         }
2355
2356         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2357                 if (ipsec_xform->tunnel.type ==
2358                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2359                         memset(&session->encap_pdb, 0,
2360                                 sizeof(struct ipsec_encap_pdb) +
2361                                 sizeof(session->ip4_hdr));
2362                         session->ip4_hdr.ip_v = IPVERSION;
2363                         session->ip4_hdr.ip_hl = 5;
2364                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2365                                                 sizeof(session->ip4_hdr));
2366                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2367                         session->ip4_hdr.ip_id = 0;
2368                         session->ip4_hdr.ip_off = 0;
2369                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2370                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2371                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2372                                         IPPROTO_ESP : IPPROTO_AH;
2373                         session->ip4_hdr.ip_sum = 0;
2374                         session->ip4_hdr.ip_src =
2375                                         ipsec_xform->tunnel.ipv4.src_ip;
2376                         session->ip4_hdr.ip_dst =
2377                                         ipsec_xform->tunnel.ipv4.dst_ip;
2378                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2379                                                 (void *)&session->ip4_hdr,
2380                                                 sizeof(struct ip));
2381                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2382                 } else if (ipsec_xform->tunnel.type ==
2383                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2384                         memset(&session->encap_pdb, 0,
2385                                 sizeof(struct ipsec_encap_pdb) +
2386                                 sizeof(session->ip6_hdr));
2387                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2388                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2389                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2390                                         RTE_IPV6_HDR_TC_SHIFT) &
2391                                         RTE_IPV6_HDR_TC_MASK) |
2392                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2393                                         RTE_IPV6_HDR_FL_SHIFT) &
2394                                         RTE_IPV6_HDR_FL_MASK));
2395                         /* Payload length will be updated by HW */
2396                         session->ip6_hdr.payload_len = 0;
2397                         session->ip6_hdr.hop_limits =
2398                                         ipsec_xform->tunnel.ipv6.hlimit;
2399                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2400                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2401                                         IPPROTO_ESP : IPPROTO_AH;
2402                         memcpy(&session->ip6_hdr.src_addr,
2403                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2404                         memcpy(&session->ip6_hdr.dst_addr,
2405                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2406                         session->encap_pdb.ip_hdr_len =
2407                                                 sizeof(struct rte_ipv6_hdr);
2408                 }
2409                 session->encap_pdb.options =
2410                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2411                         PDBOPTS_ESP_OIHI_PDB_INL |
2412                         PDBOPTS_ESP_IVSRC |
2413                         PDBHMO_ESP_ENCAP_DTTL |
2414                         PDBHMO_ESP_SNR;
2415                 if (ipsec_xform->options.esn)
2416                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2417                 session->encap_pdb.spi = ipsec_xform->spi;
2418                 session->dir = DIR_ENC;
2419         } else if (ipsec_xform->direction ==
2420                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2421                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2422                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2423                         session->decap_pdb.options = sizeof(struct ip) << 16;
2424                 else
2425                         session->decap_pdb.options =
2426                                         sizeof(struct rte_ipv6_hdr) << 16;
2427                 if (ipsec_xform->options.esn)
2428                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2429                 session->dir = DIR_DEC;
2430         } else
2431                 goto out;
2432         session->ctx_pool = internals->ctx_pool;
2433         rte_spinlock_lock(&internals->lock);
2434         for (i = 0; i < MAX_DPAA_CORES; i++) {
2435                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2436                 if (session->inq[i] == NULL) {
2437                         DPAA_SEC_ERR("unable to attach sec queue");
2438                         rte_spinlock_unlock(&internals->lock);
2439                         goto out;
2440                 }
2441         }
2442         rte_spinlock_unlock(&internals->lock);
2443
2444         return 0;
2445 out:
2446         rte_free(session->auth_key.data);
2447         rte_free(session->cipher_key.data);
2448         memset(session, 0, sizeof(dpaa_sec_session));
2449         return -1;
2450 }
2451
2452 static int
2453 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2454                           struct rte_security_session_conf *conf,
2455                           void *sess)
2456 {
2457         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2458         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2459         struct rte_crypto_auth_xform *auth_xform = NULL;
2460         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2461         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2462         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2463         uint32_t i;
2464
2465         PMD_INIT_FUNC_TRACE();
2466
2467         memset(session, 0, sizeof(dpaa_sec_session));
2468
2469         /* find xfrm types */
2470         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2471                 cipher_xform = &xform->cipher;
2472                 if (xform->next != NULL)
2473                         auth_xform = &xform->next->auth;
2474         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2475                 auth_xform = &xform->auth;
2476                 if (xform->next != NULL)
2477                         cipher_xform = &xform->next->cipher;
2478         } else {
2479                 DPAA_SEC_ERR("Invalid crypto type");
2480                 return -EINVAL;
2481         }
2482
2483         session->proto_alg = conf->protocol;
2484         if (cipher_xform) {
2485                 session->cipher_key.data = rte_zmalloc(NULL,
2486                                                cipher_xform->key.length,
2487                                                RTE_CACHE_LINE_SIZE);
2488                 if (session->cipher_key.data == NULL &&
2489                                 cipher_xform->key.length > 0) {
2490                         DPAA_SEC_ERR("No Memory for cipher key");
2491                         return -ENOMEM;
2492                 }
2493                 session->cipher_key.length = cipher_xform->key.length;
2494                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2495                         cipher_xform->key.length);
2496                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2497                                         DIR_ENC : DIR_DEC;
2498                 session->cipher_alg = cipher_xform->algo;
2499         } else {
2500                 session->cipher_key.data = NULL;
2501                 session->cipher_key.length = 0;
2502                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2503                 session->dir = DIR_ENC;
2504         }
2505
2506         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2507                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2508                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2509                         DPAA_SEC_ERR(
2510                                 "PDCP Seq Num size should be 5/12 bits for cmode");
2511                         goto out;
2512                 }
2513         }
2514
2515         if (auth_xform) {
2516                 session->auth_key.data = rte_zmalloc(NULL,
2517                                                      auth_xform->key.length,
2518                                                      RTE_CACHE_LINE_SIZE);
2519                 if (!session->auth_key.data &&
2520                     auth_xform->key.length > 0) {
2521                         DPAA_SEC_ERR("No Memory for auth key");
2522                         rte_free(session->cipher_key.data);
2523                         return -ENOMEM;
2524                 }
2525                 session->auth_key.length = auth_xform->key.length;
2526                 memcpy(session->auth_key.data, auth_xform->key.data,
2527                        auth_xform->key.length);
2528                 session->auth_alg = auth_xform->algo;
2529         } else {
2530                 session->auth_key.data = NULL;
2531                 session->auth_key.length = 0;
2532                 session->auth_alg = 0;
2533         }
2534         session->pdcp.domain = pdcp_xform->domain;
2535         session->pdcp.bearer = pdcp_xform->bearer;
2536         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2537         session->pdcp.sn_size = pdcp_xform->sn_size;
2538         session->pdcp.hfn = pdcp_xform->hfn;
2539         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2540         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2541         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2542
2543         session->ctx_pool = dev_priv->ctx_pool;
2544         rte_spinlock_lock(&dev_priv->lock);
2545         for (i = 0; i < MAX_DPAA_CORES; i++) {
2546                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2547                 if (session->inq[i] == NULL) {
2548                         DPAA_SEC_ERR("unable to attach sec queue");
2549                         rte_spinlock_unlock(&dev_priv->lock);
2550                         goto out;
2551                 }
2552         }
2553         rte_spinlock_unlock(&dev_priv->lock);
2554         return 0;
2555 out:
2556         rte_free(session->auth_key.data);
2557         rte_free(session->cipher_key.data);
2558         memset(session, 0, sizeof(dpaa_sec_session));
2559         return -1;
2560 }
2561
2562 static int
2563 dpaa_sec_security_session_create(void *dev,
2564                                  struct rte_security_session_conf *conf,
2565                                  struct rte_security_session *sess,
2566                                  struct rte_mempool *mempool)
2567 {
2568         void *sess_private_data;
2569         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2570         int ret;
2571
2572         if (rte_mempool_get(mempool, &sess_private_data)) {
2573                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2574                 return -ENOMEM;
2575         }
2576
2577         switch (conf->protocol) {
2578         case RTE_SECURITY_PROTOCOL_IPSEC:
2579                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2580                                 sess_private_data);
2581                 break;
2582         case RTE_SECURITY_PROTOCOL_PDCP:
2583                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2584                                 sess_private_data);
2585                 break;
2586         case RTE_SECURITY_PROTOCOL_MACSEC:
2587                 return -ENOTSUP;
2588         default:
2589                 return -EINVAL;
2590         }
2591         if (ret != 0) {
2592                 DPAA_SEC_ERR("failed to configure session parameters");
2593                 /* Return session to mempool */
2594                 rte_mempool_put(mempool, sess_private_data);
2595                 return ret;
2596         }
2597
2598         set_sec_session_private_data(sess, sess_private_data);
2599
2600         return ret;
2601 }
2602
2603 /** Clear the memory of session so it doesn't leave key material behind */
2604 static int
2605 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2606                 struct rte_security_session *sess)
2607 {
2608         PMD_INIT_FUNC_TRACE();
2609         void *sess_priv = get_sec_session_private_data(sess);
2610         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2611
2612         if (sess_priv) {
2613                 free_session_memory((struct rte_cryptodev *)dev, s);
2614                 set_sec_session_private_data(sess, NULL);
2615         }
2616         return 0;
2617 }
2618
2619 static int
2620 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2621                        struct rte_cryptodev_config *config __rte_unused)
2622 {
2623
2624         char str[20];
2625         struct dpaa_sec_dev_private *internals;
2626
2627         PMD_INIT_FUNC_TRACE();
2628
2629         internals = dev->data->dev_private;
2630         snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2631         if (!internals->ctx_pool) {
2632                 internals->ctx_pool = rte_mempool_create((const char *)str,
2633                                                         CTX_POOL_NUM_BUFS,
2634                                                         CTX_POOL_BUF_SIZE,
2635                                                         CTX_POOL_CACHE_SIZE, 0,
2636                                                         NULL, NULL, NULL, NULL,
2637                                                         SOCKET_ID_ANY, 0);
2638                 if (!internals->ctx_pool) {
2639                         DPAA_SEC_ERR("%s create failed\n", str);
2640                         return -ENOMEM;
2641                 }
2642         } else
2643                 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2644                                 dev->data->dev_id);
2645
2646         return 0;
2647 }
2648
2649 static int
2650 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2651 {
2652         PMD_INIT_FUNC_TRACE();
2653         return 0;
2654 }
2655
2656 static void
2657 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2658 {
2659         PMD_INIT_FUNC_TRACE();
2660 }
2661
2662 static int
2663 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2664 {
2665         struct dpaa_sec_dev_private *internals;
2666
2667         PMD_INIT_FUNC_TRACE();
2668
2669         if (dev == NULL)
2670                 return -ENOMEM;
2671
2672         internals = dev->data->dev_private;
2673         rte_mempool_free(internals->ctx_pool);
2674         internals->ctx_pool = NULL;
2675
2676         return 0;
2677 }
2678
2679 static void
2680 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2681                        struct rte_cryptodev_info *info)
2682 {
2683         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2684
2685         PMD_INIT_FUNC_TRACE();
2686         if (info != NULL) {
2687                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2688                 info->feature_flags = dev->feature_flags;
2689                 info->capabilities = dpaa_sec_capabilities;
2690                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2691                 info->driver_id = cryptodev_driver_id;
2692         }
2693 }
2694
2695 static struct rte_cryptodev_ops crypto_ops = {
2696         .dev_configure        = dpaa_sec_dev_configure,
2697         .dev_start            = dpaa_sec_dev_start,
2698         .dev_stop             = dpaa_sec_dev_stop,
2699         .dev_close            = dpaa_sec_dev_close,
2700         .dev_infos_get        = dpaa_sec_dev_infos_get,
2701         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2702         .queue_pair_release   = dpaa_sec_queue_pair_release,
2703         .queue_pair_count     = dpaa_sec_queue_pair_count,
2704         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2705         .sym_session_configure    = dpaa_sec_sym_session_configure,
2706         .sym_session_clear        = dpaa_sec_sym_session_clear
2707 };
2708
2709 static const struct rte_security_capability *
2710 dpaa_sec_capabilities_get(void *device __rte_unused)
2711 {
2712         return dpaa_sec_security_cap;
2713 }
2714
2715 static const struct rte_security_ops dpaa_sec_security_ops = {
2716         .session_create = dpaa_sec_security_session_create,
2717         .session_update = NULL,
2718         .session_stats_get = NULL,
2719         .session_destroy = dpaa_sec_security_session_destroy,
2720         .set_pkt_metadata = NULL,
2721         .capabilities_get = dpaa_sec_capabilities_get
2722 };
2723
2724 static int
2725 dpaa_sec_uninit(struct rte_cryptodev *dev)
2726 {
2727         struct dpaa_sec_dev_private *internals;
2728
2729         if (dev == NULL)
2730                 return -ENODEV;
2731
2732         internals = dev->data->dev_private;
2733         rte_free(dev->security_ctx);
2734
2735         /* In case close has been called, internals->ctx_pool would be NULL */
2736         rte_mempool_free(internals->ctx_pool);
2737         rte_free(internals);
2738
2739         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2740                       dev->data->name, rte_socket_id());
2741
2742         return 0;
2743 }
2744
2745 static int
2746 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2747 {
2748         struct dpaa_sec_dev_private *internals;
2749         struct rte_security_ctx *security_instance;
2750         struct dpaa_sec_qp *qp;
2751         uint32_t i, flags;
2752         int ret;
2753
2754         PMD_INIT_FUNC_TRACE();
2755
2756         cryptodev->driver_id = cryptodev_driver_id;
2757         cryptodev->dev_ops = &crypto_ops;
2758
2759         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2760         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2761         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2762                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2763                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2764                         RTE_CRYPTODEV_FF_SECURITY |
2765                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2766                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2767                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2768                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2769                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2770
2771         internals = cryptodev->data->dev_private;
2772         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2773         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2774
2775         /*
2776          * For secondary processes, we don't initialise any further as primary
2777          * has already done this work. Only check we don't need a different
2778          * RX function
2779          */
2780         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2781                 DPAA_SEC_WARN("Device already init by primary process");
2782                 return 0;
2783         }
2784
2785         /* Initialize security_ctx only for primary process*/
2786         security_instance = rte_malloc("rte_security_instances_ops",
2787                                 sizeof(struct rte_security_ctx), 0);
2788         if (security_instance == NULL)
2789                 return -ENOMEM;
2790         security_instance->device = (void *)cryptodev;
2791         security_instance->ops = &dpaa_sec_security_ops;
2792         security_instance->sess_cnt = 0;
2793         cryptodev->security_ctx = security_instance;
2794
2795         rte_spinlock_init(&internals->lock);
2796         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2797                 /* init qman fq for queue pair */
2798                 qp = &internals->qps[i];
2799                 ret = dpaa_sec_init_tx(&qp->outq);
2800                 if (ret) {
2801                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2802                         goto init_error;
2803                 }
2804         }
2805
2806         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2807                 QMAN_FQ_FLAG_TO_DCPORTAL;
2808         for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2809                 /* create rx qman fq for sessions*/
2810                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2811                 if (unlikely(ret != 0)) {
2812                         DPAA_SEC_ERR("sec qman_create_fq failed");
2813                         goto init_error;
2814                 }
2815         }
2816
2817         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2818         return 0;
2819
2820 init_error:
2821         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2822
2823         dpaa_sec_uninit(cryptodev);
2824         return -EFAULT;
2825 }
2826
2827 static int
2828 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2829                                 struct rte_dpaa_device *dpaa_dev)
2830 {
2831         struct rte_cryptodev *cryptodev;
2832         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2833
2834         int retval;
2835
2836         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2837                         dpaa_dev->id.dev_id);
2838
2839         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2840         if (cryptodev == NULL)
2841                 return -ENOMEM;
2842
2843         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2844                 cryptodev->data->dev_private = rte_zmalloc_socket(
2845                                         "cryptodev private structure",
2846                                         sizeof(struct dpaa_sec_dev_private),
2847                                         RTE_CACHE_LINE_SIZE,
2848                                         rte_socket_id());
2849
2850                 if (cryptodev->data->dev_private == NULL)
2851                         rte_panic("Cannot allocate memzone for private "
2852                                         "device data");
2853         }
2854
2855         dpaa_dev->crypto_dev = cryptodev;
2856         cryptodev->device = &dpaa_dev->device;
2857
2858         /* init user callbacks */
2859         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2860
2861         /* if sec device version is not configured */
2862         if (!rta_get_sec_era()) {
2863                 const struct device_node *caam_node;
2864
2865                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2866                         const uint32_t *prop = of_get_property(caam_node,
2867                                         "fsl,sec-era",
2868                                         NULL);
2869                         if (prop) {
2870                                 rta_set_sec_era(
2871                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2872                                 break;
2873                         }
2874                 }
2875         }
2876
2877         /* Invoke PMD device initialization function */
2878         retval = dpaa_sec_dev_init(cryptodev);
2879         if (retval == 0)
2880                 return 0;
2881
2882         /* In case of error, cleanup is done */
2883         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2884                 rte_free(cryptodev->data->dev_private);
2885
2886         rte_cryptodev_pmd_release_device(cryptodev);
2887
2888         return -ENXIO;
2889 }
2890
2891 static int
2892 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2893 {
2894         struct rte_cryptodev *cryptodev;
2895         int ret;
2896
2897         cryptodev = dpaa_dev->crypto_dev;
2898         if (cryptodev == NULL)
2899                 return -ENODEV;
2900
2901         ret = dpaa_sec_uninit(cryptodev);
2902         if (ret)
2903                 return ret;
2904
2905         return rte_cryptodev_pmd_destroy(cryptodev);
2906 }
2907
2908 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2909         .drv_type = FSL_DPAA_CRYPTO,
2910         .driver = {
2911                 .name = "DPAA SEC PMD"
2912         },
2913         .probe = cryptodev_dpaa_sec_probe,
2914         .remove = cryptodev_dpaa_sec_remove,
2915 };
2916
2917 static struct cryptodev_driver dpaa_sec_crypto_drv;
2918
2919 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2920 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2921                 cryptodev_driver_id);
2922
2923 RTE_INIT(dpaa_sec_init_log)
2924 {
2925         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2926         if (dpaa_logtype_sec >= 0)
2927                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2928 }