1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2018 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
44 static uint8_t cryptodev_driver_id;
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 if (!ctx->fd_status) {
56 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62 /* report op status to sym->op and then free the ctx memeory */
63 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
69 struct dpaa_sec_op_ctx *ctx;
72 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
88 ctx->ctx_pool = ses->ctx_pool;
89 ctx->vtop_offset = (size_t) ctx
90 - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
107 dpaa_mem_ptov(rte_iova_t paddr)
109 return rte_mem_iova2virt(paddr);
113 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
115 const struct qm_mr_entry *msg)
117 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
118 fq->fqid, msg->ern.rc, msg->ern.seqnum);
121 /* initialize the queue with dest chan as caam chan so that
122 * all the packets in this queue could be dispatched into caam
125 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
128 struct qm_mcc_initfq fq_opts;
132 /* Clear FQ options */
133 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
135 flags = QMAN_INITFQ_FLAG_SCHED;
136 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
137 QM_INITFQ_WE_CONTEXTB;
139 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
140 fq_opts.fqd.context_b = fqid_out;
141 fq_opts.fqd.dest.channel = qm_channel_caam;
142 fq_opts.fqd.dest.wq = 0;
144 fq_in->cb.ern = ern_sec_fq_handler;
146 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
148 ret = qman_init_fq(fq_in, flags, &fq_opts);
149 if (unlikely(ret != 0))
150 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
155 /* something is put into in_fq and caam put the crypto result into out_fq */
156 static enum qman_cb_dqrr_result
157 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
158 struct qman_fq *fq __always_unused,
159 const struct qm_dqrr_entry *dqrr)
161 const struct qm_fd *fd;
162 struct dpaa_sec_job *job;
163 struct dpaa_sec_op_ctx *ctx;
165 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
166 return qman_cb_dqrr_defer;
168 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
169 return qman_cb_dqrr_consume;
172 /* sg is embedded in an op ctx,
173 * sg[0] is for output
176 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
178 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
179 ctx->fd_status = fd->status;
180 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
181 struct qm_sg_entry *sg_out;
184 sg_out = &job->sg[0];
185 hw_sg_to_cpu(sg_out);
186 len = sg_out->length;
187 ctx->op->sym->m_src->pkt_len = len;
188 ctx->op->sym->m_src->data_len = len;
190 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
191 dpaa_sec_op_ending(ctx);
193 return qman_cb_dqrr_consume;
196 /* caam result is put into this queue */
198 dpaa_sec_init_tx(struct qman_fq *fq)
201 struct qm_mcc_initfq opts;
204 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
205 QMAN_FQ_FLAG_DYNAMIC_FQID;
207 ret = qman_create_fq(0, flags, fq);
209 DPAA_SEC_ERR("qman_create_fq failed");
213 memset(&opts, 0, sizeof(opts));
214 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
215 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
217 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
219 fq->cb.dqrr = dqrr_out_fq_cb_rx;
220 fq->cb.ern = ern_sec_fq_handler;
222 ret = qman_init_fq(fq, 0, &opts);
224 DPAA_SEC_ERR("unable to init caam source fq!");
231 static inline int is_cipher_only(dpaa_sec_session *ses)
233 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
234 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
237 static inline int is_auth_only(dpaa_sec_session *ses)
239 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
240 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
243 static inline int is_aead(dpaa_sec_session *ses)
245 return ((ses->cipher_alg == 0) &&
246 (ses->auth_alg == 0) &&
247 (ses->aead_alg != 0));
250 static inline int is_auth_cipher(dpaa_sec_session *ses)
252 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
253 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
254 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
257 static inline int is_proto_ipsec(dpaa_sec_session *ses)
259 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
262 static inline int is_encode(dpaa_sec_session *ses)
264 return ses->dir == DIR_ENC;
267 static inline int is_decode(dpaa_sec_session *ses)
269 return ses->dir == DIR_DEC;
273 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
275 switch (ses->auth_alg) {
276 case RTE_CRYPTO_AUTH_NULL:
277 ses->digest_length = 0;
279 case RTE_CRYPTO_AUTH_MD5_HMAC:
281 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
282 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
283 alginfo_a->algmode = OP_ALG_AAI_HMAC;
285 case RTE_CRYPTO_AUTH_SHA1_HMAC:
287 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
288 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
289 alginfo_a->algmode = OP_ALG_AAI_HMAC;
291 case RTE_CRYPTO_AUTH_SHA224_HMAC:
293 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
294 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
295 alginfo_a->algmode = OP_ALG_AAI_HMAC;
297 case RTE_CRYPTO_AUTH_SHA256_HMAC:
299 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
301 alginfo_a->algmode = OP_ALG_AAI_HMAC;
303 case RTE_CRYPTO_AUTH_SHA384_HMAC:
305 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
307 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 case RTE_CRYPTO_AUTH_SHA512_HMAC:
311 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
313 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
321 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
323 switch (ses->cipher_alg) {
324 case RTE_CRYPTO_CIPHER_NULL:
326 case RTE_CRYPTO_CIPHER_AES_CBC:
328 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
330 alginfo_c->algmode = OP_ALG_AAI_CBC;
332 case RTE_CRYPTO_CIPHER_3DES_CBC:
334 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
336 alginfo_c->algmode = OP_ALG_AAI_CBC;
338 case RTE_CRYPTO_CIPHER_AES_CTR:
340 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
341 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
342 alginfo_c->algmode = OP_ALG_AAI_CTR;
345 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
350 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
352 switch (ses->aead_alg) {
353 case RTE_CRYPTO_AEAD_AES_GCM:
354 alginfo->algtype = OP_ALG_ALGSEL_AES;
355 alginfo->algmode = OP_ALG_AAI_GCM;
358 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
363 /* prepare command block of the session */
365 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
367 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
368 int32_t shared_desc_len = 0;
369 struct sec_cdb *cdb = &ses->cdb;
371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
377 memset(cdb, 0, sizeof(struct sec_cdb));
379 if (is_cipher_only(ses)) {
380 caam_cipher_alg(ses, &alginfo_c);
381 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
382 DPAA_SEC_ERR("not supported cipher alg");
386 alginfo_c.key = (size_t)ses->cipher_key.data;
387 alginfo_c.keylen = ses->cipher_key.length;
388 alginfo_c.key_enc_flags = 0;
389 alginfo_c.key_type = RTA_DATA_IMM;
391 shared_desc_len = cnstr_shdsc_blkcipher(
397 } else if (is_auth_only(ses)) {
398 caam_auth_alg(ses, &alginfo_a);
399 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
400 DPAA_SEC_ERR("not supported auth alg");
404 alginfo_a.key = (size_t)ses->auth_key.data;
405 alginfo_a.keylen = ses->auth_key.length;
406 alginfo_a.key_enc_flags = 0;
407 alginfo_a.key_type = RTA_DATA_IMM;
409 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
413 } else if (is_aead(ses)) {
414 caam_aead_alg(ses, &alginfo);
415 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
416 DPAA_SEC_ERR("not supported aead alg");
419 alginfo.key = (size_t)ses->aead_key.data;
420 alginfo.keylen = ses->aead_key.length;
421 alginfo.key_enc_flags = 0;
422 alginfo.key_type = RTA_DATA_IMM;
424 if (ses->dir == DIR_ENC)
425 shared_desc_len = cnstr_shdsc_gcm_encap(
426 cdb->sh_desc, true, swap,
431 shared_desc_len = cnstr_shdsc_gcm_decap(
432 cdb->sh_desc, true, swap,
437 caam_cipher_alg(ses, &alginfo_c);
438 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 DPAA_SEC_ERR("not supported cipher alg");
443 alginfo_c.key = (size_t)ses->cipher_key.data;
444 alginfo_c.keylen = ses->cipher_key.length;
445 alginfo_c.key_enc_flags = 0;
446 alginfo_c.key_type = RTA_DATA_IMM;
448 caam_auth_alg(ses, &alginfo_a);
449 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
450 DPAA_SEC_ERR("not supported auth alg");
454 alginfo_a.key = (size_t)ses->auth_key.data;
455 alginfo_a.keylen = ses->auth_key.length;
456 alginfo_a.key_enc_flags = 0;
457 alginfo_a.key_type = RTA_DATA_IMM;
459 cdb->sh_desc[0] = alginfo_c.keylen;
460 cdb->sh_desc[1] = alginfo_a.keylen;
461 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
463 (unsigned int *)cdb->sh_desc,
464 &cdb->sh_desc[2], 2);
467 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
470 if (cdb->sh_desc[2] & 1)
471 alginfo_c.key_type = RTA_DATA_IMM;
473 alginfo_c.key = (size_t)dpaa_mem_vtop(
474 (void *)(size_t)alginfo_c.key);
475 alginfo_c.key_type = RTA_DATA_PTR;
477 if (cdb->sh_desc[2] & (1<<1))
478 alginfo_a.key_type = RTA_DATA_IMM;
480 alginfo_a.key = (size_t)dpaa_mem_vtop(
481 (void *)(size_t)alginfo_a.key);
482 alginfo_a.key_type = RTA_DATA_PTR;
487 if (is_proto_ipsec(ses)) {
488 if (ses->dir == DIR_ENC) {
489 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
491 true, swap, &ses->encap_pdb,
492 (uint8_t *)&ses->ip4_hdr,
493 &alginfo_c, &alginfo_a);
494 } else if (ses->dir == DIR_DEC) {
495 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
497 true, swap, &ses->decap_pdb,
498 &alginfo_c, &alginfo_a);
501 /* Auth_only_len is set as 0 here and it will be
502 * overwritten in fd for each packet.
504 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
505 true, swap, &alginfo_c, &alginfo_a,
507 ses->digest_length, ses->dir);
511 if (shared_desc_len < 0) {
512 DPAA_SEC_ERR("error in preparing command block");
513 return shared_desc_len;
516 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
517 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
518 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
523 /* qp is lockless, should be accessed by only one thread */
525 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
528 unsigned int pkts = 0;
529 int num_rx_bufs, ret;
530 struct qm_dqrr_entry *dq;
531 uint32_t vdqcr_flags = 0;
535 * Until request for four buffers, we provide exact number of buffers.
536 * Otherwise we do not set the QM_VDQCR_EXACT flag.
537 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
538 * requested, so we request two less in this case.
541 vdqcr_flags = QM_VDQCR_EXACT;
542 num_rx_bufs = nb_ops;
544 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
545 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
547 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
552 const struct qm_fd *fd;
553 struct dpaa_sec_job *job;
554 struct dpaa_sec_op_ctx *ctx;
555 struct rte_crypto_op *op;
557 dq = qman_dequeue(fq);
562 /* sg is embedded in an op ctx,
563 * sg[0] is for output
566 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
568 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
569 ctx->fd_status = fd->status;
571 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
572 struct qm_sg_entry *sg_out;
575 sg_out = &job->sg[0];
576 hw_sg_to_cpu(sg_out);
577 len = sg_out->length;
578 op->sym->m_src->pkt_len = len;
579 op->sym->m_src->data_len = len;
581 if (!ctx->fd_status) {
582 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
584 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
585 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
589 /* report op status to sym->op and then free the ctx memeory */
590 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
592 qman_dqrr_consume(fq, dq);
593 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
598 static inline struct dpaa_sec_job *
599 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
601 struct rte_crypto_sym_op *sym = op->sym;
602 struct rte_mbuf *mbuf = sym->m_src;
603 struct dpaa_sec_job *cf;
604 struct dpaa_sec_op_ctx *ctx;
605 struct qm_sg_entry *sg, *out_sg, *in_sg;
606 phys_addr_t start_addr;
607 uint8_t *old_digest, extra_segs;
614 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
615 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
619 ctx = dpaa_sec_alloc_ctx(ses);
625 old_digest = ctx->digest;
629 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
630 out_sg->length = ses->digest_length;
631 cpu_to_hw_sg(out_sg);
635 /* need to extend the input to a compound frame */
636 in_sg->extension = 1;
638 in_sg->length = sym->auth.data.length;
639 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
643 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
644 sg->length = mbuf->data_len - sym->auth.data.offset;
645 sg->offset = sym->auth.data.offset;
647 /* Successive segs */
652 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
653 sg->length = mbuf->data_len;
657 if (is_decode(ses)) {
658 /* Digest verification case */
661 rte_memcpy(old_digest, sym->auth.digest.data,
663 start_addr = dpaa_mem_vtop(old_digest);
664 qm_sg_entry_set64(sg, start_addr);
665 sg->length = ses->digest_length;
666 in_sg->length += ses->digest_length;
668 /* Digest calculation case */
669 sg->length -= ses->digest_length;
680 * |<----data_len------->|
681 * |ip_header|ah_header|icv|payload|
686 static inline struct dpaa_sec_job *
687 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
689 struct rte_crypto_sym_op *sym = op->sym;
690 struct rte_mbuf *mbuf = sym->m_src;
691 struct dpaa_sec_job *cf;
692 struct dpaa_sec_op_ctx *ctx;
693 struct qm_sg_entry *sg;
694 rte_iova_t start_addr;
697 ctx = dpaa_sec_alloc_ctx(ses);
703 old_digest = ctx->digest;
705 start_addr = rte_pktmbuf_iova(mbuf);
708 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
709 sg->length = ses->digest_length;
714 if (is_decode(ses)) {
715 /* need to extend the input to a compound frame */
717 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
718 sg->length = sym->auth.data.length + ses->digest_length;
723 /* hash result or digest, save digest first */
724 rte_memcpy(old_digest, sym->auth.digest.data,
726 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
727 sg->length = sym->auth.data.length;
730 /* let's check digest by hw */
731 start_addr = dpaa_mem_vtop(old_digest);
733 qm_sg_entry_set64(sg, start_addr);
734 sg->length = ses->digest_length;
738 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
739 sg->length = sym->auth.data.length;
747 static inline struct dpaa_sec_job *
748 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
750 struct rte_crypto_sym_op *sym = op->sym;
751 struct dpaa_sec_job *cf;
752 struct dpaa_sec_op_ctx *ctx;
753 struct qm_sg_entry *sg, *out_sg, *in_sg;
754 struct rte_mbuf *mbuf;
756 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
761 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
764 req_segs = mbuf->nb_segs * 2 + 3;
767 if (req_segs > MAX_SG_ENTRIES) {
768 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
773 ctx = dpaa_sec_alloc_ctx(ses);
782 out_sg->extension = 1;
783 out_sg->length = sym->cipher.data.length;
784 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
785 cpu_to_hw_sg(out_sg);
789 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
790 sg->length = mbuf->data_len - sym->cipher.data.offset;
791 sg->offset = sym->cipher.data.offset;
793 /* Successive segs */
798 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
799 sg->length = mbuf->data_len;
808 in_sg->extension = 1;
810 in_sg->length = sym->cipher.data.length + ses->iv.length;
813 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
817 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
818 sg->length = ses->iv.length;
823 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
824 sg->length = mbuf->data_len - sym->cipher.data.offset;
825 sg->offset = sym->cipher.data.offset;
827 /* Successive segs */
832 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
833 sg->length = mbuf->data_len;
842 static inline struct dpaa_sec_job *
843 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
845 struct rte_crypto_sym_op *sym = op->sym;
846 struct dpaa_sec_job *cf;
847 struct dpaa_sec_op_ctx *ctx;
848 struct qm_sg_entry *sg;
849 rte_iova_t src_start_addr, dst_start_addr;
850 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
853 ctx = dpaa_sec_alloc_ctx(ses);
860 src_start_addr = rte_pktmbuf_iova(sym->m_src);
863 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
865 dst_start_addr = src_start_addr;
869 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
870 sg->length = sym->cipher.data.length + ses->iv.length;
876 /* need to extend the input to a compound frame */
879 sg->length = sym->cipher.data.length + ses->iv.length;
880 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
884 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
885 sg->length = ses->iv.length;
889 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
890 sg->length = sym->cipher.data.length;
897 static inline struct dpaa_sec_job *
898 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
900 struct rte_crypto_sym_op *sym = op->sym;
901 struct dpaa_sec_job *cf;
902 struct dpaa_sec_op_ctx *ctx;
903 struct qm_sg_entry *sg, *out_sg, *in_sg;
904 struct rte_mbuf *mbuf;
906 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
911 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
914 req_segs = mbuf->nb_segs * 2 + 4;
917 if (ses->auth_only_len)
920 if (req_segs > MAX_SG_ENTRIES) {
921 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
926 ctx = dpaa_sec_alloc_ctx(ses);
933 rte_prefetch0(cf->sg);
937 out_sg->extension = 1;
939 out_sg->length = sym->aead.data.length + ses->auth_only_len
940 + ses->digest_length;
942 out_sg->length = sym->aead.data.length + ses->auth_only_len;
944 /* output sg entries */
946 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
947 cpu_to_hw_sg(out_sg);
950 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
951 sg->length = mbuf->data_len - sym->aead.data.offset +
953 sg->offset = sym->aead.data.offset - ses->auth_only_len;
955 /* Successive segs */
960 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
961 sg->length = mbuf->data_len;
964 sg->length -= ses->digest_length;
966 if (is_encode(ses)) {
968 /* set auth output */
970 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
971 sg->length = ses->digest_length;
979 in_sg->extension = 1;
982 in_sg->length = ses->iv.length + sym->aead.data.length
983 + ses->auth_only_len;
985 in_sg->length = ses->iv.length + sym->aead.data.length
986 + ses->auth_only_len + ses->digest_length;
988 /* input sg entries */
990 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
994 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
995 sg->length = ses->iv.length;
998 /* 2nd seg auth only */
999 if (ses->auth_only_len) {
1001 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1002 sg->length = ses->auth_only_len;
1008 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1009 sg->length = mbuf->data_len - sym->aead.data.offset;
1010 sg->offset = sym->aead.data.offset;
1012 /* Successive segs */
1017 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1018 sg->length = mbuf->data_len;
1022 if (is_decode(ses)) {
1025 memcpy(ctx->digest, sym->aead.digest.data,
1026 ses->digest_length);
1027 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1028 sg->length = ses->digest_length;
1036 static inline struct dpaa_sec_job *
1037 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1039 struct rte_crypto_sym_op *sym = op->sym;
1040 struct dpaa_sec_job *cf;
1041 struct dpaa_sec_op_ctx *ctx;
1042 struct qm_sg_entry *sg;
1043 uint32_t length = 0;
1044 rte_iova_t src_start_addr, dst_start_addr;
1045 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1048 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1051 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1053 dst_start_addr = src_start_addr;
1055 ctx = dpaa_sec_alloc_ctx(ses);
1063 rte_prefetch0(cf->sg);
1065 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1066 if (is_encode(ses)) {
1067 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1068 sg->length = ses->iv.length;
1069 length += sg->length;
1073 if (ses->auth_only_len) {
1074 qm_sg_entry_set64(sg,
1075 dpaa_mem_vtop(sym->aead.aad.data));
1076 sg->length = ses->auth_only_len;
1077 length += sg->length;
1081 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1082 sg->length = sym->aead.data.length;
1083 length += sg->length;
1087 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1088 sg->length = ses->iv.length;
1089 length += sg->length;
1093 if (ses->auth_only_len) {
1094 qm_sg_entry_set64(sg,
1095 dpaa_mem_vtop(sym->aead.aad.data));
1096 sg->length = ses->auth_only_len;
1097 length += sg->length;
1101 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1102 sg->length = sym->aead.data.length;
1103 length += sg->length;
1106 memcpy(ctx->digest, sym->aead.digest.data,
1107 ses->digest_length);
1110 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1111 sg->length = ses->digest_length;
1112 length += sg->length;
1116 /* input compound frame */
1117 cf->sg[1].length = length;
1118 cf->sg[1].extension = 1;
1119 cf->sg[1].final = 1;
1120 cpu_to_hw_sg(&cf->sg[1]);
1124 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1125 qm_sg_entry_set64(sg,
1126 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1127 sg->length = sym->aead.data.length + ses->auth_only_len;
1128 length = sg->length;
1129 if (is_encode(ses)) {
1131 /* set auth output */
1133 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1134 sg->length = ses->digest_length;
1135 length += sg->length;
1140 /* output compound frame */
1141 cf->sg[0].length = length;
1142 cf->sg[0].extension = 1;
1143 cpu_to_hw_sg(&cf->sg[0]);
1148 static inline struct dpaa_sec_job *
1149 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1151 struct rte_crypto_sym_op *sym = op->sym;
1152 struct dpaa_sec_job *cf;
1153 struct dpaa_sec_op_ctx *ctx;
1154 struct qm_sg_entry *sg, *out_sg, *in_sg;
1155 struct rte_mbuf *mbuf;
1157 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1162 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1165 req_segs = mbuf->nb_segs * 2 + 4;
1168 if (req_segs > MAX_SG_ENTRIES) {
1169 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1174 ctx = dpaa_sec_alloc_ctx(ses);
1181 rte_prefetch0(cf->sg);
1184 out_sg = &cf->sg[0];
1185 out_sg->extension = 1;
1187 out_sg->length = sym->auth.data.length + ses->digest_length;
1189 out_sg->length = sym->auth.data.length;
1191 /* output sg entries */
1193 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1194 cpu_to_hw_sg(out_sg);
1197 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1198 sg->length = mbuf->data_len - sym->auth.data.offset;
1199 sg->offset = sym->auth.data.offset;
1201 /* Successive segs */
1206 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1207 sg->length = mbuf->data_len;
1210 sg->length -= ses->digest_length;
1212 if (is_encode(ses)) {
1214 /* set auth output */
1216 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1217 sg->length = ses->digest_length;
1225 in_sg->extension = 1;
1228 in_sg->length = ses->iv.length + sym->auth.data.length;
1230 in_sg->length = ses->iv.length + sym->auth.data.length
1231 + ses->digest_length;
1233 /* input sg entries */
1235 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1236 cpu_to_hw_sg(in_sg);
1239 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1240 sg->length = ses->iv.length;
1245 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1246 sg->length = mbuf->data_len - sym->auth.data.offset;
1247 sg->offset = sym->auth.data.offset;
1249 /* Successive segs */
1254 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1255 sg->length = mbuf->data_len;
1259 sg->length -= ses->digest_length;
1260 if (is_decode(ses)) {
1263 memcpy(ctx->digest, sym->auth.digest.data,
1264 ses->digest_length);
1265 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1266 sg->length = ses->digest_length;
1274 static inline struct dpaa_sec_job *
1275 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1277 struct rte_crypto_sym_op *sym = op->sym;
1278 struct dpaa_sec_job *cf;
1279 struct dpaa_sec_op_ctx *ctx;
1280 struct qm_sg_entry *sg;
1281 rte_iova_t src_start_addr, dst_start_addr;
1282 uint32_t length = 0;
1283 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1286 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1288 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1290 dst_start_addr = src_start_addr;
1292 ctx = dpaa_sec_alloc_ctx(ses);
1300 rte_prefetch0(cf->sg);
1302 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1303 if (is_encode(ses)) {
1304 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1305 sg->length = ses->iv.length;
1306 length += sg->length;
1310 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1311 sg->length = sym->auth.data.length;
1312 length += sg->length;
1316 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1317 sg->length = ses->iv.length;
1318 length += sg->length;
1323 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1324 sg->length = sym->auth.data.length;
1325 length += sg->length;
1328 memcpy(ctx->digest, sym->auth.digest.data,
1329 ses->digest_length);
1332 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1333 sg->length = ses->digest_length;
1334 length += sg->length;
1338 /* input compound frame */
1339 cf->sg[1].length = length;
1340 cf->sg[1].extension = 1;
1341 cf->sg[1].final = 1;
1342 cpu_to_hw_sg(&cf->sg[1]);
1346 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1347 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1348 sg->length = sym->cipher.data.length;
1349 length = sg->length;
1350 if (is_encode(ses)) {
1352 /* set auth output */
1354 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1355 sg->length = ses->digest_length;
1356 length += sg->length;
1361 /* output compound frame */
1362 cf->sg[0].length = length;
1363 cf->sg[0].extension = 1;
1364 cpu_to_hw_sg(&cf->sg[0]);
1369 static inline struct dpaa_sec_job *
1370 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1372 struct rte_crypto_sym_op *sym = op->sym;
1373 struct dpaa_sec_job *cf;
1374 struct dpaa_sec_op_ctx *ctx;
1375 struct qm_sg_entry *sg;
1376 phys_addr_t src_start_addr, dst_start_addr;
1378 ctx = dpaa_sec_alloc_ctx(ses);
1384 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1387 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1389 dst_start_addr = src_start_addr;
1393 qm_sg_entry_set64(sg, src_start_addr);
1394 sg->length = sym->m_src->pkt_len;
1398 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1401 qm_sg_entry_set64(sg, dst_start_addr);
1402 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1409 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1412 /* Function to transmit the frames to given device and queuepair */
1414 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1415 uint16_t num_tx = 0;
1416 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1417 uint32_t frames_to_send;
1418 struct rte_crypto_op *op;
1419 struct dpaa_sec_job *cf;
1420 dpaa_sec_session *ses;
1421 uint32_t auth_only_len;
1422 struct qman_fq *inq[DPAA_SEC_BURST];
1425 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1426 DPAA_SEC_BURST : nb_ops;
1427 for (loop = 0; loop < frames_to_send; loop++) {
1429 switch (op->sess_type) {
1430 case RTE_CRYPTO_OP_WITH_SESSION:
1431 ses = (dpaa_sec_session *)
1432 get_sym_session_private_data(
1434 cryptodev_driver_id);
1436 case RTE_CRYPTO_OP_SECURITY_SESSION:
1437 ses = (dpaa_sec_session *)
1438 get_sec_session_private_data(
1439 op->sym->sec_session);
1443 "sessionless crypto op not supported");
1444 frames_to_send = loop;
1448 if (unlikely(!ses->qp || ses->qp != qp)) {
1449 DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1451 if (dpaa_sec_attach_sess_q(qp, ses)) {
1452 frames_to_send = loop;
1458 auth_only_len = op->sym->auth.data.length -
1459 op->sym->cipher.data.length;
1460 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1461 if (is_auth_only(ses)) {
1462 cf = build_auth_only(op, ses);
1463 } else if (is_cipher_only(ses)) {
1464 cf = build_cipher_only(op, ses);
1465 } else if (is_aead(ses)) {
1466 cf = build_cipher_auth_gcm(op, ses);
1467 auth_only_len = ses->auth_only_len;
1468 } else if (is_auth_cipher(ses)) {
1469 cf = build_cipher_auth(op, ses);
1470 } else if (is_proto_ipsec(ses)) {
1471 cf = build_proto(op, ses);
1473 DPAA_SEC_DP_ERR("not supported ops");
1474 frames_to_send = loop;
1479 if (is_auth_only(ses)) {
1480 cf = build_auth_only_sg(op, ses);
1481 } else if (is_cipher_only(ses)) {
1482 cf = build_cipher_only_sg(op, ses);
1483 } else if (is_aead(ses)) {
1484 cf = build_cipher_auth_gcm_sg(op, ses);
1485 auth_only_len = ses->auth_only_len;
1486 } else if (is_auth_cipher(ses)) {
1487 cf = build_cipher_auth_sg(op, ses);
1489 DPAA_SEC_DP_ERR("not supported ops");
1490 frames_to_send = loop;
1495 if (unlikely(!cf)) {
1496 frames_to_send = loop;
1502 inq[loop] = ses->inq;
1503 fd->opaque_addr = 0;
1505 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1506 fd->_format1 = qm_fd_compound;
1507 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1508 /* Auth_only_len is set as 0 in descriptor and it is
1509 * overwritten here in the fd.cmd which will update
1513 fd->cmd = 0x80000000 | auth_only_len;
1518 while (loop < frames_to_send) {
1519 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1520 frames_to_send - loop);
1522 nb_ops -= frames_to_send;
1523 num_tx += frames_to_send;
1526 dpaa_qp->tx_pkts += num_tx;
1527 dpaa_qp->tx_errs += nb_ops - num_tx;
1533 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1537 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1539 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1541 dpaa_qp->rx_pkts += num_rx;
1542 dpaa_qp->rx_errs += nb_ops - num_rx;
1544 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1549 /** Release queue pair */
1551 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1554 struct dpaa_sec_dev_private *internals;
1555 struct dpaa_sec_qp *qp = NULL;
1557 PMD_INIT_FUNC_TRACE();
1559 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1561 internals = dev->data->dev_private;
1562 if (qp_id >= internals->max_nb_queue_pairs) {
1563 DPAA_SEC_ERR("Max supported qpid %d",
1564 internals->max_nb_queue_pairs);
1568 qp = &internals->qps[qp_id];
1569 qp->internals = NULL;
1570 dev->data->queue_pairs[qp_id] = NULL;
1575 /** Setup a queue pair */
1577 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1578 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1579 __rte_unused int socket_id,
1580 __rte_unused struct rte_mempool *session_pool)
1582 struct dpaa_sec_dev_private *internals;
1583 struct dpaa_sec_qp *qp = NULL;
1585 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1587 internals = dev->data->dev_private;
1588 if (qp_id >= internals->max_nb_queue_pairs) {
1589 DPAA_SEC_ERR("Max supported qpid %d",
1590 internals->max_nb_queue_pairs);
1594 qp = &internals->qps[qp_id];
1595 qp->internals = internals;
1596 dev->data->queue_pairs[qp_id] = qp;
1601 /** Return the number of allocated queue pairs */
1603 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1605 PMD_INIT_FUNC_TRACE();
1607 return dev->data->nb_queue_pairs;
1610 /** Returns the size of session structure */
1612 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1614 PMD_INIT_FUNC_TRACE();
1616 return sizeof(dpaa_sec_session);
1620 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1621 struct rte_crypto_sym_xform *xform,
1622 dpaa_sec_session *session)
1624 session->cipher_alg = xform->cipher.algo;
1625 session->iv.length = xform->cipher.iv.length;
1626 session->iv.offset = xform->cipher.iv.offset;
1627 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1628 RTE_CACHE_LINE_SIZE);
1629 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1630 DPAA_SEC_ERR("No Memory for cipher key");
1633 session->cipher_key.length = xform->cipher.key.length;
1635 memcpy(session->cipher_key.data, xform->cipher.key.data,
1636 xform->cipher.key.length);
1637 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1644 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1645 struct rte_crypto_sym_xform *xform,
1646 dpaa_sec_session *session)
1648 session->auth_alg = xform->auth.algo;
1649 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1650 RTE_CACHE_LINE_SIZE);
1651 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1652 DPAA_SEC_ERR("No Memory for auth key");
1655 session->auth_key.length = xform->auth.key.length;
1656 session->digest_length = xform->auth.digest_length;
1658 memcpy(session->auth_key.data, xform->auth.key.data,
1659 xform->auth.key.length);
1660 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1667 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1668 struct rte_crypto_sym_xform *xform,
1669 dpaa_sec_session *session)
1671 session->aead_alg = xform->aead.algo;
1672 session->iv.length = xform->aead.iv.length;
1673 session->iv.offset = xform->aead.iv.offset;
1674 session->auth_only_len = xform->aead.aad_length;
1675 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1676 RTE_CACHE_LINE_SIZE);
1677 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1678 DPAA_SEC_ERR("No Memory for aead key\n");
1681 session->aead_key.length = xform->aead.key.length;
1682 session->digest_length = xform->aead.digest_length;
1684 memcpy(session->aead_key.data, xform->aead.key.data,
1685 xform->aead.key.length);
1686 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1692 static struct qman_fq *
1693 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1697 for (i = 0; i < qi->max_nb_sessions; i++) {
1698 if (qi->inq_attach[i] == 0) {
1699 qi->inq_attach[i] = 1;
1703 DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1709 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1713 for (i = 0; i < qi->max_nb_sessions; i++) {
1714 if (&qi->inq[i] == fq) {
1715 qman_retire_fq(fq, NULL);
1717 qi->inq_attach[i] = 0;
1725 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1730 ret = dpaa_sec_prep_cdb(sess);
1732 DPAA_SEC_ERR("Unable to prepare sec cdb");
1735 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1736 ret = rte_dpaa_portal_init((void *)0);
1738 DPAA_SEC_ERR("Failure in affining portal");
1742 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1743 qman_fq_fqid(&qp->outq));
1745 DPAA_SEC_ERR("Unable to init sec queue");
1751 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1752 struct rte_crypto_sym_xform *xform, void *sess)
1754 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1755 dpaa_sec_session *session = sess;
1757 PMD_INIT_FUNC_TRACE();
1759 if (unlikely(sess == NULL)) {
1760 DPAA_SEC_ERR("invalid session struct");
1764 /* Default IV length = 0 */
1765 session->iv.length = 0;
1768 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1769 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1770 dpaa_sec_cipher_init(dev, xform, session);
1772 /* Authentication Only */
1773 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1774 xform->next == NULL) {
1775 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1776 dpaa_sec_auth_init(dev, xform, session);
1778 /* Cipher then Authenticate */
1779 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1780 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1781 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1782 dpaa_sec_cipher_init(dev, xform, session);
1783 dpaa_sec_auth_init(dev, xform->next, session);
1785 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1789 /* Authenticate then Cipher */
1790 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1791 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1792 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1793 dpaa_sec_auth_init(dev, xform, session);
1794 dpaa_sec_cipher_init(dev, xform->next, session);
1796 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1800 /* AEAD operation for AES-GCM kind of Algorithms */
1801 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1802 xform->next == NULL) {
1803 dpaa_sec_aead_init(dev, xform, session);
1806 DPAA_SEC_ERR("Invalid crypto type");
1809 session->ctx_pool = internals->ctx_pool;
1810 session->inq = dpaa_sec_attach_rxq(internals);
1811 if (session->inq == NULL) {
1812 DPAA_SEC_ERR("unable to attach sec queue");
1819 rte_free(session->cipher_key.data);
1820 rte_free(session->auth_key.data);
1821 memset(session, 0, sizeof(dpaa_sec_session));
1827 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1828 struct rte_crypto_sym_xform *xform,
1829 struct rte_cryptodev_sym_session *sess,
1830 struct rte_mempool *mempool)
1832 void *sess_private_data;
1835 PMD_INIT_FUNC_TRACE();
1837 if (rte_mempool_get(mempool, &sess_private_data)) {
1838 DPAA_SEC_ERR("Couldn't get object from session mempool");
1842 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1844 DPAA_SEC_ERR("failed to configure session parameters");
1846 /* Return session to mempool */
1847 rte_mempool_put(mempool, sess_private_data);
1851 set_sym_session_private_data(sess, dev->driver_id,
1858 /** Clear the memory of session so it doesn't leave key material behind */
1860 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1861 struct rte_cryptodev_sym_session *sess)
1863 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1864 uint8_t index = dev->driver_id;
1865 void *sess_priv = get_sym_session_private_data(sess, index);
1867 PMD_INIT_FUNC_TRACE();
1869 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1872 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1875 dpaa_sec_detach_rxq(qi, s->inq);
1876 rte_free(s->cipher_key.data);
1877 rte_free(s->auth_key.data);
1878 memset(s, 0, sizeof(dpaa_sec_session));
1879 set_sym_session_private_data(sess, index, NULL);
1880 rte_mempool_put(sess_mp, sess_priv);
1885 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1886 struct rte_security_session_conf *conf,
1889 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1890 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1891 struct rte_crypto_auth_xform *auth_xform;
1892 struct rte_crypto_cipher_xform *cipher_xform;
1893 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1895 PMD_INIT_FUNC_TRACE();
1897 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1898 cipher_xform = &conf->crypto_xform->cipher;
1899 auth_xform = &conf->crypto_xform->next->auth;
1901 auth_xform = &conf->crypto_xform->auth;
1902 cipher_xform = &conf->crypto_xform->next->cipher;
1904 session->proto_alg = conf->protocol;
1905 session->cipher_key.data = rte_zmalloc(NULL,
1906 cipher_xform->key.length,
1907 RTE_CACHE_LINE_SIZE);
1908 if (session->cipher_key.data == NULL &&
1909 cipher_xform->key.length > 0) {
1910 DPAA_SEC_ERR("No Memory for cipher key");
1914 session->cipher_key.length = cipher_xform->key.length;
1915 session->auth_key.data = rte_zmalloc(NULL,
1916 auth_xform->key.length,
1917 RTE_CACHE_LINE_SIZE);
1918 if (session->auth_key.data == NULL &&
1919 auth_xform->key.length > 0) {
1920 DPAA_SEC_ERR("No Memory for auth key");
1921 rte_free(session->cipher_key.data);
1924 session->auth_key.length = auth_xform->key.length;
1925 memcpy(session->cipher_key.data, cipher_xform->key.data,
1926 cipher_xform->key.length);
1927 memcpy(session->auth_key.data, auth_xform->key.data,
1928 auth_xform->key.length);
1930 switch (auth_xform->algo) {
1931 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1932 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1934 case RTE_CRYPTO_AUTH_MD5_HMAC:
1935 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1937 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1938 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1940 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1941 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1943 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1944 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1946 case RTE_CRYPTO_AUTH_AES_CMAC:
1947 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1949 case RTE_CRYPTO_AUTH_NULL:
1950 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1952 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1953 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1954 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1955 case RTE_CRYPTO_AUTH_SHA1:
1956 case RTE_CRYPTO_AUTH_SHA256:
1957 case RTE_CRYPTO_AUTH_SHA512:
1958 case RTE_CRYPTO_AUTH_SHA224:
1959 case RTE_CRYPTO_AUTH_SHA384:
1960 case RTE_CRYPTO_AUTH_MD5:
1961 case RTE_CRYPTO_AUTH_AES_GMAC:
1962 case RTE_CRYPTO_AUTH_KASUMI_F9:
1963 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1964 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1965 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
1969 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
1974 switch (cipher_xform->algo) {
1975 case RTE_CRYPTO_CIPHER_AES_CBC:
1976 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1978 case RTE_CRYPTO_CIPHER_3DES_CBC:
1979 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1981 case RTE_CRYPTO_CIPHER_AES_CTR:
1982 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1984 case RTE_CRYPTO_CIPHER_NULL:
1985 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1986 case RTE_CRYPTO_CIPHER_3DES_ECB:
1987 case RTE_CRYPTO_CIPHER_AES_ECB:
1988 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1989 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1990 cipher_xform->algo);
1993 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
1994 cipher_xform->algo);
1998 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1999 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2000 sizeof(session->ip4_hdr));
2001 session->ip4_hdr.ip_v = IPVERSION;
2002 session->ip4_hdr.ip_hl = 5;
2003 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2004 sizeof(session->ip4_hdr));
2005 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2006 session->ip4_hdr.ip_id = 0;
2007 session->ip4_hdr.ip_off = 0;
2008 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2009 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2010 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2012 session->ip4_hdr.ip_sum = 0;
2013 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2014 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2015 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2016 (void *)&session->ip4_hdr,
2019 session->encap_pdb.options =
2020 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2021 PDBOPTS_ESP_OIHI_PDB_INL |
2023 PDBHMO_ESP_ENCAP_DTTL;
2024 session->encap_pdb.spi = ipsec_xform->spi;
2025 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2027 session->dir = DIR_ENC;
2028 } else if (ipsec_xform->direction ==
2029 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2030 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2031 session->decap_pdb.options = sizeof(struct ip) << 16;
2032 session->dir = DIR_DEC;
2035 session->ctx_pool = internals->ctx_pool;
2036 session->inq = dpaa_sec_attach_rxq(internals);
2037 if (session->inq == NULL) {
2038 DPAA_SEC_ERR("unable to attach sec queue");
2045 rte_free(session->auth_key.data);
2046 rte_free(session->cipher_key.data);
2047 memset(session, 0, sizeof(dpaa_sec_session));
2052 dpaa_sec_security_session_create(void *dev,
2053 struct rte_security_session_conf *conf,
2054 struct rte_security_session *sess,
2055 struct rte_mempool *mempool)
2057 void *sess_private_data;
2058 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2061 if (rte_mempool_get(mempool, &sess_private_data)) {
2062 DPAA_SEC_ERR("Couldn't get object from session mempool");
2066 switch (conf->protocol) {
2067 case RTE_SECURITY_PROTOCOL_IPSEC:
2068 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2071 case RTE_SECURITY_PROTOCOL_MACSEC:
2077 DPAA_SEC_ERR("failed to configure session parameters");
2078 /* Return session to mempool */
2079 rte_mempool_put(mempool, sess_private_data);
2083 set_sec_session_private_data(sess, sess_private_data);
2088 /** Clear the memory of session so it doesn't leave key material behind */
2090 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2091 struct rte_security_session *sess)
2093 PMD_INIT_FUNC_TRACE();
2094 void *sess_priv = get_sec_session_private_data(sess);
2096 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2099 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2101 rte_free(s->cipher_key.data);
2102 rte_free(s->auth_key.data);
2103 memset(sess, 0, sizeof(dpaa_sec_session));
2104 set_sec_session_private_data(sess, NULL);
2105 rte_mempool_put(sess_mp, sess_priv);
2112 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2113 struct rte_cryptodev_config *config __rte_unused)
2117 struct dpaa_sec_dev_private *internals;
2119 PMD_INIT_FUNC_TRACE();
2121 internals = dev->data->dev_private;
2122 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2123 if (!internals->ctx_pool) {
2124 internals->ctx_pool = rte_mempool_create((const char *)str,
2127 CTX_POOL_CACHE_SIZE, 0,
2128 NULL, NULL, NULL, NULL,
2130 if (!internals->ctx_pool) {
2131 DPAA_SEC_ERR("%s create failed\n", str);
2135 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2142 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2144 PMD_INIT_FUNC_TRACE();
2149 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2151 PMD_INIT_FUNC_TRACE();
2155 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2157 struct dpaa_sec_dev_private *internals;
2159 PMD_INIT_FUNC_TRACE();
2164 internals = dev->data->dev_private;
2165 rte_mempool_free(internals->ctx_pool);
2166 internals->ctx_pool = NULL;
2172 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2173 struct rte_cryptodev_info *info)
2175 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2177 PMD_INIT_FUNC_TRACE();
2179 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2180 info->feature_flags = dev->feature_flags;
2181 info->capabilities = dpaa_sec_capabilities;
2182 info->sym.max_nb_sessions = internals->max_nb_sessions;
2183 info->driver_id = cryptodev_driver_id;
2187 static struct rte_cryptodev_ops crypto_ops = {
2188 .dev_configure = dpaa_sec_dev_configure,
2189 .dev_start = dpaa_sec_dev_start,
2190 .dev_stop = dpaa_sec_dev_stop,
2191 .dev_close = dpaa_sec_dev_close,
2192 .dev_infos_get = dpaa_sec_dev_infos_get,
2193 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2194 .queue_pair_release = dpaa_sec_queue_pair_release,
2195 .queue_pair_count = dpaa_sec_queue_pair_count,
2196 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2197 .sym_session_configure = dpaa_sec_sym_session_configure,
2198 .sym_session_clear = dpaa_sec_sym_session_clear
2201 static const struct rte_security_capability *
2202 dpaa_sec_capabilities_get(void *device __rte_unused)
2204 return dpaa_sec_security_cap;
2207 struct rte_security_ops dpaa_sec_security_ops = {
2208 .session_create = dpaa_sec_security_session_create,
2209 .session_update = NULL,
2210 .session_stats_get = NULL,
2211 .session_destroy = dpaa_sec_security_session_destroy,
2212 .set_pkt_metadata = NULL,
2213 .capabilities_get = dpaa_sec_capabilities_get
2217 dpaa_sec_uninit(struct rte_cryptodev *dev)
2219 struct dpaa_sec_dev_private *internals;
2224 internals = dev->data->dev_private;
2225 rte_free(dev->security_ctx);
2227 /* In case close has been called, internals->ctx_pool would be NULL */
2228 rte_mempool_free(internals->ctx_pool);
2229 rte_free(internals);
2231 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2232 dev->data->name, rte_socket_id());
2238 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2240 struct dpaa_sec_dev_private *internals;
2241 struct rte_security_ctx *security_instance;
2242 struct dpaa_sec_qp *qp;
2246 PMD_INIT_FUNC_TRACE();
2248 cryptodev->driver_id = cryptodev_driver_id;
2249 cryptodev->dev_ops = &crypto_ops;
2251 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2252 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2253 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2254 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2255 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2256 RTE_CRYPTODEV_FF_SECURITY |
2257 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2258 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2259 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2260 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2261 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2263 internals = cryptodev->data->dev_private;
2264 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2265 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2268 * For secondary processes, we don't initialise any further as primary
2269 * has already done this work. Only check we don't need a different
2272 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2273 DPAA_SEC_WARN("Device already init by primary process");
2277 /* Initialize security_ctx only for primary process*/
2278 security_instance = rte_malloc("rte_security_instances_ops",
2279 sizeof(struct rte_security_ctx), 0);
2280 if (security_instance == NULL)
2282 security_instance->device = (void *)cryptodev;
2283 security_instance->ops = &dpaa_sec_security_ops;
2284 security_instance->sess_cnt = 0;
2285 cryptodev->security_ctx = security_instance;
2287 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2288 /* init qman fq for queue pair */
2289 qp = &internals->qps[i];
2290 ret = dpaa_sec_init_tx(&qp->outq);
2292 DPAA_SEC_ERR("config tx of queue pair %d", i);
2297 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2298 QMAN_FQ_FLAG_TO_DCPORTAL;
2299 for (i = 0; i < internals->max_nb_sessions; i++) {
2300 /* create rx qman fq for sessions*/
2301 ret = qman_create_fq(0, flags, &internals->inq[i]);
2302 if (unlikely(ret != 0)) {
2303 DPAA_SEC_ERR("sec qman_create_fq failed");
2308 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2312 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2314 dpaa_sec_uninit(cryptodev);
2319 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2320 struct rte_dpaa_device *dpaa_dev)
2322 struct rte_cryptodev *cryptodev;
2323 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2327 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2329 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2330 if (cryptodev == NULL)
2333 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2334 cryptodev->data->dev_private = rte_zmalloc_socket(
2335 "cryptodev private structure",
2336 sizeof(struct dpaa_sec_dev_private),
2337 RTE_CACHE_LINE_SIZE,
2340 if (cryptodev->data->dev_private == NULL)
2341 rte_panic("Cannot allocate memzone for private "
2345 dpaa_dev->crypto_dev = cryptodev;
2346 cryptodev->device = &dpaa_dev->device;
2347 cryptodev->device->driver = &dpaa_drv->driver;
2349 /* init user callbacks */
2350 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2352 /* if sec device version is not configured */
2353 if (!rta_get_sec_era()) {
2354 const struct device_node *caam_node;
2356 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2357 const uint32_t *prop = of_get_property(caam_node,
2362 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2368 /* Invoke PMD device initialization function */
2369 retval = dpaa_sec_dev_init(cryptodev);
2373 /* In case of error, cleanup is done */
2374 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2375 rte_free(cryptodev->data->dev_private);
2377 rte_cryptodev_pmd_release_device(cryptodev);
2383 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2385 struct rte_cryptodev *cryptodev;
2388 cryptodev = dpaa_dev->crypto_dev;
2389 if (cryptodev == NULL)
2392 ret = dpaa_sec_uninit(cryptodev);
2396 return rte_cryptodev_pmd_destroy(cryptodev);
2399 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2400 .drv_type = FSL_DPAA_CRYPTO,
2402 .name = "DPAA SEC PMD"
2404 .probe = cryptodev_dpaa_sec_probe,
2405 .remove = cryptodev_dpaa_sec_remove,
2408 static struct cryptodev_driver dpaa_sec_crypto_drv;
2410 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2411 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2412 cryptodev_driver_id);
2414 RTE_INIT(dpaa_sec_init_log)
2416 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2417 if (dpaa_logtype_sec >= 0)
2418 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);