1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2018 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
44 static uint8_t cryptodev_driver_id;
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 if (!ctx->fd_status) {
56 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62 /* report op status to sym->op and then free the ctx memeory */
63 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
69 struct dpaa_sec_op_ctx *ctx;
72 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
88 ctx->ctx_pool = ses->ctx_pool;
89 ctx->vtop_offset = (size_t) ctx
90 - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
107 dpaa_mem_ptov(rte_iova_t paddr)
109 return rte_mem_iova2virt(paddr);
113 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
115 const struct qm_mr_entry *msg)
117 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
118 fq->fqid, msg->ern.rc, msg->ern.seqnum);
121 /* initialize the queue with dest chan as caam chan so that
122 * all the packets in this queue could be dispatched into caam
125 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
128 struct qm_mcc_initfq fq_opts;
132 /* Clear FQ options */
133 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
135 flags = QMAN_INITFQ_FLAG_SCHED;
136 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
137 QM_INITFQ_WE_CONTEXTB;
139 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
140 fq_opts.fqd.context_b = fqid_out;
141 fq_opts.fqd.dest.channel = qm_channel_caam;
142 fq_opts.fqd.dest.wq = 0;
144 fq_in->cb.ern = ern_sec_fq_handler;
146 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
148 ret = qman_init_fq(fq_in, flags, &fq_opts);
149 if (unlikely(ret != 0))
150 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
155 /* something is put into in_fq and caam put the crypto result into out_fq */
156 static enum qman_cb_dqrr_result
157 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
158 struct qman_fq *fq __always_unused,
159 const struct qm_dqrr_entry *dqrr)
161 const struct qm_fd *fd;
162 struct dpaa_sec_job *job;
163 struct dpaa_sec_op_ctx *ctx;
165 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
166 return qman_cb_dqrr_defer;
168 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
169 return qman_cb_dqrr_consume;
172 /* sg is embedded in an op ctx,
173 * sg[0] is for output
176 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
178 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
179 ctx->fd_status = fd->status;
180 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
181 struct qm_sg_entry *sg_out;
184 sg_out = &job->sg[0];
185 hw_sg_to_cpu(sg_out);
186 len = sg_out->length;
187 ctx->op->sym->m_src->pkt_len = len;
188 ctx->op->sym->m_src->data_len = len;
190 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
191 dpaa_sec_op_ending(ctx);
193 return qman_cb_dqrr_consume;
196 /* caam result is put into this queue */
198 dpaa_sec_init_tx(struct qman_fq *fq)
201 struct qm_mcc_initfq opts;
204 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
205 QMAN_FQ_FLAG_DYNAMIC_FQID;
207 ret = qman_create_fq(0, flags, fq);
209 DPAA_SEC_ERR("qman_create_fq failed");
213 memset(&opts, 0, sizeof(opts));
214 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
215 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
217 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
219 fq->cb.dqrr = dqrr_out_fq_cb_rx;
220 fq->cb.ern = ern_sec_fq_handler;
222 ret = qman_init_fq(fq, 0, &opts);
224 DPAA_SEC_ERR("unable to init caam source fq!");
231 static inline int is_cipher_only(dpaa_sec_session *ses)
233 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
234 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
237 static inline int is_auth_only(dpaa_sec_session *ses)
239 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
240 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
243 static inline int is_aead(dpaa_sec_session *ses)
245 return ((ses->cipher_alg == 0) &&
246 (ses->auth_alg == 0) &&
247 (ses->aead_alg != 0));
250 static inline int is_auth_cipher(dpaa_sec_session *ses)
252 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
253 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
254 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
257 static inline int is_proto_ipsec(dpaa_sec_session *ses)
259 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
262 static inline int is_encode(dpaa_sec_session *ses)
264 return ses->dir == DIR_ENC;
267 static inline int is_decode(dpaa_sec_session *ses)
269 return ses->dir == DIR_DEC;
273 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
275 switch (ses->auth_alg) {
276 case RTE_CRYPTO_AUTH_NULL:
277 ses->digest_length = 0;
279 case RTE_CRYPTO_AUTH_MD5_HMAC:
281 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
282 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
283 alginfo_a->algmode = OP_ALG_AAI_HMAC;
285 case RTE_CRYPTO_AUTH_SHA1_HMAC:
287 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
288 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
289 alginfo_a->algmode = OP_ALG_AAI_HMAC;
291 case RTE_CRYPTO_AUTH_SHA224_HMAC:
293 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
294 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
295 alginfo_a->algmode = OP_ALG_AAI_HMAC;
297 case RTE_CRYPTO_AUTH_SHA256_HMAC:
299 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
301 alginfo_a->algmode = OP_ALG_AAI_HMAC;
303 case RTE_CRYPTO_AUTH_SHA384_HMAC:
305 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
307 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 case RTE_CRYPTO_AUTH_SHA512_HMAC:
311 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
313 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
321 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
323 switch (ses->cipher_alg) {
324 case RTE_CRYPTO_CIPHER_NULL:
326 case RTE_CRYPTO_CIPHER_AES_CBC:
328 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
330 alginfo_c->algmode = OP_ALG_AAI_CBC;
332 case RTE_CRYPTO_CIPHER_3DES_CBC:
334 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
336 alginfo_c->algmode = OP_ALG_AAI_CBC;
338 case RTE_CRYPTO_CIPHER_AES_CTR:
340 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
341 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
342 alginfo_c->algmode = OP_ALG_AAI_CTR;
345 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
350 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
352 switch (ses->aead_alg) {
353 case RTE_CRYPTO_AEAD_AES_GCM:
354 alginfo->algtype = OP_ALG_ALGSEL_AES;
355 alginfo->algmode = OP_ALG_AAI_GCM;
358 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
363 /* prepare command block of the session */
365 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
367 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
368 int32_t shared_desc_len = 0;
369 struct sec_cdb *cdb = &ses->cdb;
371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
377 memset(cdb, 0, sizeof(struct sec_cdb));
379 if (is_cipher_only(ses)) {
380 caam_cipher_alg(ses, &alginfo_c);
381 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
382 DPAA_SEC_ERR("not supported cipher alg");
386 alginfo_c.key = (size_t)ses->cipher_key.data;
387 alginfo_c.keylen = ses->cipher_key.length;
388 alginfo_c.key_enc_flags = 0;
389 alginfo_c.key_type = RTA_DATA_IMM;
391 shared_desc_len = cnstr_shdsc_blkcipher(
397 } else if (is_auth_only(ses)) {
398 caam_auth_alg(ses, &alginfo_a);
399 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
400 DPAA_SEC_ERR("not supported auth alg");
404 alginfo_a.key = (size_t)ses->auth_key.data;
405 alginfo_a.keylen = ses->auth_key.length;
406 alginfo_a.key_enc_flags = 0;
407 alginfo_a.key_type = RTA_DATA_IMM;
409 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
413 } else if (is_aead(ses)) {
414 caam_aead_alg(ses, &alginfo);
415 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
416 DPAA_SEC_ERR("not supported aead alg");
419 alginfo.key = (size_t)ses->aead_key.data;
420 alginfo.keylen = ses->aead_key.length;
421 alginfo.key_enc_flags = 0;
422 alginfo.key_type = RTA_DATA_IMM;
424 if (ses->dir == DIR_ENC)
425 shared_desc_len = cnstr_shdsc_gcm_encap(
426 cdb->sh_desc, true, swap,
431 shared_desc_len = cnstr_shdsc_gcm_decap(
432 cdb->sh_desc, true, swap,
437 caam_cipher_alg(ses, &alginfo_c);
438 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 DPAA_SEC_ERR("not supported cipher alg");
443 alginfo_c.key = (size_t)ses->cipher_key.data;
444 alginfo_c.keylen = ses->cipher_key.length;
445 alginfo_c.key_enc_flags = 0;
446 alginfo_c.key_type = RTA_DATA_IMM;
448 caam_auth_alg(ses, &alginfo_a);
449 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
450 DPAA_SEC_ERR("not supported auth alg");
454 alginfo_a.key = (size_t)ses->auth_key.data;
455 alginfo_a.keylen = ses->auth_key.length;
456 alginfo_a.key_enc_flags = 0;
457 alginfo_a.key_type = RTA_DATA_IMM;
459 cdb->sh_desc[0] = alginfo_c.keylen;
460 cdb->sh_desc[1] = alginfo_a.keylen;
461 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
463 (unsigned int *)cdb->sh_desc,
464 &cdb->sh_desc[2], 2);
467 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
470 if (cdb->sh_desc[2] & 1)
471 alginfo_c.key_type = RTA_DATA_IMM;
473 alginfo_c.key = (size_t)dpaa_mem_vtop(
474 (void *)(size_t)alginfo_c.key);
475 alginfo_c.key_type = RTA_DATA_PTR;
477 if (cdb->sh_desc[2] & (1<<1))
478 alginfo_a.key_type = RTA_DATA_IMM;
480 alginfo_a.key = (size_t)dpaa_mem_vtop(
481 (void *)(size_t)alginfo_a.key);
482 alginfo_a.key_type = RTA_DATA_PTR;
487 if (is_proto_ipsec(ses)) {
488 if (ses->dir == DIR_ENC) {
489 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
491 true, swap, SHR_SERIAL,
493 (uint8_t *)&ses->ip4_hdr,
494 &alginfo_c, &alginfo_a);
495 } else if (ses->dir == DIR_DEC) {
496 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
498 true, swap, SHR_SERIAL,
500 &alginfo_c, &alginfo_a);
503 /* Auth_only_len is set as 0 here and it will be
504 * overwritten in fd for each packet.
506 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
507 true, swap, &alginfo_c, &alginfo_a,
509 ses->digest_length, ses->dir);
513 if (shared_desc_len < 0) {
514 DPAA_SEC_ERR("error in preparing command block");
515 return shared_desc_len;
518 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
519 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
520 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
525 /* qp is lockless, should be accessed by only one thread */
527 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
530 unsigned int pkts = 0;
531 int num_rx_bufs, ret;
532 struct qm_dqrr_entry *dq;
533 uint32_t vdqcr_flags = 0;
537 * Until request for four buffers, we provide exact number of buffers.
538 * Otherwise we do not set the QM_VDQCR_EXACT flag.
539 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
540 * requested, so we request two less in this case.
543 vdqcr_flags = QM_VDQCR_EXACT;
544 num_rx_bufs = nb_ops;
546 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
547 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
549 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
554 const struct qm_fd *fd;
555 struct dpaa_sec_job *job;
556 struct dpaa_sec_op_ctx *ctx;
557 struct rte_crypto_op *op;
559 dq = qman_dequeue(fq);
564 /* sg is embedded in an op ctx,
565 * sg[0] is for output
568 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
570 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
571 ctx->fd_status = fd->status;
573 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
574 struct qm_sg_entry *sg_out;
577 sg_out = &job->sg[0];
578 hw_sg_to_cpu(sg_out);
579 len = sg_out->length;
580 op->sym->m_src->pkt_len = len;
581 op->sym->m_src->data_len = len;
583 if (!ctx->fd_status) {
584 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
586 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
587 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
591 /* report op status to sym->op and then free the ctx memeory */
592 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
594 qman_dqrr_consume(fq, dq);
595 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
600 static inline struct dpaa_sec_job *
601 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
603 struct rte_crypto_sym_op *sym = op->sym;
604 struct rte_mbuf *mbuf = sym->m_src;
605 struct dpaa_sec_job *cf;
606 struct dpaa_sec_op_ctx *ctx;
607 struct qm_sg_entry *sg, *out_sg, *in_sg;
608 phys_addr_t start_addr;
609 uint8_t *old_digest, extra_segs;
616 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
617 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
621 ctx = dpaa_sec_alloc_ctx(ses);
627 old_digest = ctx->digest;
631 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
632 out_sg->length = ses->digest_length;
633 cpu_to_hw_sg(out_sg);
637 /* need to extend the input to a compound frame */
638 in_sg->extension = 1;
640 in_sg->length = sym->auth.data.length;
641 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
645 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
646 sg->length = mbuf->data_len - sym->auth.data.offset;
647 sg->offset = sym->auth.data.offset;
649 /* Successive segs */
654 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
655 sg->length = mbuf->data_len;
659 if (is_decode(ses)) {
660 /* Digest verification case */
663 rte_memcpy(old_digest, sym->auth.digest.data,
665 start_addr = dpaa_mem_vtop(old_digest);
666 qm_sg_entry_set64(sg, start_addr);
667 sg->length = ses->digest_length;
668 in_sg->length += ses->digest_length;
670 /* Digest calculation case */
671 sg->length -= ses->digest_length;
682 * |<----data_len------->|
683 * |ip_header|ah_header|icv|payload|
688 static inline struct dpaa_sec_job *
689 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
691 struct rte_crypto_sym_op *sym = op->sym;
692 struct rte_mbuf *mbuf = sym->m_src;
693 struct dpaa_sec_job *cf;
694 struct dpaa_sec_op_ctx *ctx;
695 struct qm_sg_entry *sg;
696 rte_iova_t start_addr;
699 ctx = dpaa_sec_alloc_ctx(ses);
705 old_digest = ctx->digest;
707 start_addr = rte_pktmbuf_iova(mbuf);
710 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
711 sg->length = ses->digest_length;
716 if (is_decode(ses)) {
717 /* need to extend the input to a compound frame */
719 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
720 sg->length = sym->auth.data.length + ses->digest_length;
725 /* hash result or digest, save digest first */
726 rte_memcpy(old_digest, sym->auth.digest.data,
728 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
729 sg->length = sym->auth.data.length;
732 /* let's check digest by hw */
733 start_addr = dpaa_mem_vtop(old_digest);
735 qm_sg_entry_set64(sg, start_addr);
736 sg->length = ses->digest_length;
740 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
741 sg->length = sym->auth.data.length;
749 static inline struct dpaa_sec_job *
750 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
752 struct rte_crypto_sym_op *sym = op->sym;
753 struct dpaa_sec_job *cf;
754 struct dpaa_sec_op_ctx *ctx;
755 struct qm_sg_entry *sg, *out_sg, *in_sg;
756 struct rte_mbuf *mbuf;
758 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
763 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
766 req_segs = mbuf->nb_segs * 2 + 3;
769 if (req_segs > MAX_SG_ENTRIES) {
770 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
775 ctx = dpaa_sec_alloc_ctx(ses);
784 out_sg->extension = 1;
785 out_sg->length = sym->cipher.data.length;
786 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
787 cpu_to_hw_sg(out_sg);
791 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
792 sg->length = mbuf->data_len - sym->cipher.data.offset;
793 sg->offset = sym->cipher.data.offset;
795 /* Successive segs */
800 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
801 sg->length = mbuf->data_len;
810 in_sg->extension = 1;
812 in_sg->length = sym->cipher.data.length + ses->iv.length;
815 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
819 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
820 sg->length = ses->iv.length;
825 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
826 sg->length = mbuf->data_len - sym->cipher.data.offset;
827 sg->offset = sym->cipher.data.offset;
829 /* Successive segs */
834 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
835 sg->length = mbuf->data_len;
844 static inline struct dpaa_sec_job *
845 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
847 struct rte_crypto_sym_op *sym = op->sym;
848 struct dpaa_sec_job *cf;
849 struct dpaa_sec_op_ctx *ctx;
850 struct qm_sg_entry *sg;
851 rte_iova_t src_start_addr, dst_start_addr;
852 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
855 ctx = dpaa_sec_alloc_ctx(ses);
862 src_start_addr = rte_pktmbuf_iova(sym->m_src);
865 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
867 dst_start_addr = src_start_addr;
871 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
872 sg->length = sym->cipher.data.length + ses->iv.length;
878 /* need to extend the input to a compound frame */
881 sg->length = sym->cipher.data.length + ses->iv.length;
882 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
886 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
887 sg->length = ses->iv.length;
891 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
892 sg->length = sym->cipher.data.length;
899 static inline struct dpaa_sec_job *
900 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
902 struct rte_crypto_sym_op *sym = op->sym;
903 struct dpaa_sec_job *cf;
904 struct dpaa_sec_op_ctx *ctx;
905 struct qm_sg_entry *sg, *out_sg, *in_sg;
906 struct rte_mbuf *mbuf;
908 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
913 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
916 req_segs = mbuf->nb_segs * 2 + 4;
919 if (ses->auth_only_len)
922 if (req_segs > MAX_SG_ENTRIES) {
923 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
928 ctx = dpaa_sec_alloc_ctx(ses);
935 rte_prefetch0(cf->sg);
939 out_sg->extension = 1;
941 out_sg->length = sym->aead.data.length + ses->auth_only_len
942 + ses->digest_length;
944 out_sg->length = sym->aead.data.length + ses->auth_only_len;
946 /* output sg entries */
948 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
949 cpu_to_hw_sg(out_sg);
952 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
953 sg->length = mbuf->data_len - sym->aead.data.offset +
955 sg->offset = sym->aead.data.offset - ses->auth_only_len;
957 /* Successive segs */
962 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
963 sg->length = mbuf->data_len;
966 sg->length -= ses->digest_length;
968 if (is_encode(ses)) {
970 /* set auth output */
972 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
973 sg->length = ses->digest_length;
981 in_sg->extension = 1;
984 in_sg->length = ses->iv.length + sym->aead.data.length
985 + ses->auth_only_len;
987 in_sg->length = ses->iv.length + sym->aead.data.length
988 + ses->auth_only_len + ses->digest_length;
990 /* input sg entries */
992 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
996 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
997 sg->length = ses->iv.length;
1000 /* 2nd seg auth only */
1001 if (ses->auth_only_len) {
1003 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1004 sg->length = ses->auth_only_len;
1010 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1011 sg->length = mbuf->data_len - sym->aead.data.offset;
1012 sg->offset = sym->aead.data.offset;
1014 /* Successive segs */
1019 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1020 sg->length = mbuf->data_len;
1024 if (is_decode(ses)) {
1027 memcpy(ctx->digest, sym->aead.digest.data,
1028 ses->digest_length);
1029 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1030 sg->length = ses->digest_length;
1038 static inline struct dpaa_sec_job *
1039 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1041 struct rte_crypto_sym_op *sym = op->sym;
1042 struct dpaa_sec_job *cf;
1043 struct dpaa_sec_op_ctx *ctx;
1044 struct qm_sg_entry *sg;
1045 uint32_t length = 0;
1046 rte_iova_t src_start_addr, dst_start_addr;
1047 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1050 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1053 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1055 dst_start_addr = src_start_addr;
1057 ctx = dpaa_sec_alloc_ctx(ses);
1065 rte_prefetch0(cf->sg);
1067 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1068 if (is_encode(ses)) {
1069 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1070 sg->length = ses->iv.length;
1071 length += sg->length;
1075 if (ses->auth_only_len) {
1076 qm_sg_entry_set64(sg,
1077 dpaa_mem_vtop(sym->aead.aad.data));
1078 sg->length = ses->auth_only_len;
1079 length += sg->length;
1083 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1084 sg->length = sym->aead.data.length;
1085 length += sg->length;
1089 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1090 sg->length = ses->iv.length;
1091 length += sg->length;
1095 if (ses->auth_only_len) {
1096 qm_sg_entry_set64(sg,
1097 dpaa_mem_vtop(sym->aead.aad.data));
1098 sg->length = ses->auth_only_len;
1099 length += sg->length;
1103 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1104 sg->length = sym->aead.data.length;
1105 length += sg->length;
1108 memcpy(ctx->digest, sym->aead.digest.data,
1109 ses->digest_length);
1112 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1113 sg->length = ses->digest_length;
1114 length += sg->length;
1118 /* input compound frame */
1119 cf->sg[1].length = length;
1120 cf->sg[1].extension = 1;
1121 cf->sg[1].final = 1;
1122 cpu_to_hw_sg(&cf->sg[1]);
1126 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1127 qm_sg_entry_set64(sg,
1128 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1129 sg->length = sym->aead.data.length + ses->auth_only_len;
1130 length = sg->length;
1131 if (is_encode(ses)) {
1133 /* set auth output */
1135 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1136 sg->length = ses->digest_length;
1137 length += sg->length;
1142 /* output compound frame */
1143 cf->sg[0].length = length;
1144 cf->sg[0].extension = 1;
1145 cpu_to_hw_sg(&cf->sg[0]);
1150 static inline struct dpaa_sec_job *
1151 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1153 struct rte_crypto_sym_op *sym = op->sym;
1154 struct dpaa_sec_job *cf;
1155 struct dpaa_sec_op_ctx *ctx;
1156 struct qm_sg_entry *sg, *out_sg, *in_sg;
1157 struct rte_mbuf *mbuf;
1159 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1164 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1167 req_segs = mbuf->nb_segs * 2 + 4;
1170 if (req_segs > MAX_SG_ENTRIES) {
1171 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1176 ctx = dpaa_sec_alloc_ctx(ses);
1183 rte_prefetch0(cf->sg);
1186 out_sg = &cf->sg[0];
1187 out_sg->extension = 1;
1189 out_sg->length = sym->auth.data.length + ses->digest_length;
1191 out_sg->length = sym->auth.data.length;
1193 /* output sg entries */
1195 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1196 cpu_to_hw_sg(out_sg);
1199 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1200 sg->length = mbuf->data_len - sym->auth.data.offset;
1201 sg->offset = sym->auth.data.offset;
1203 /* Successive segs */
1208 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1209 sg->length = mbuf->data_len;
1212 sg->length -= ses->digest_length;
1214 if (is_encode(ses)) {
1216 /* set auth output */
1218 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1219 sg->length = ses->digest_length;
1227 in_sg->extension = 1;
1230 in_sg->length = ses->iv.length + sym->auth.data.length;
1232 in_sg->length = ses->iv.length + sym->auth.data.length
1233 + ses->digest_length;
1235 /* input sg entries */
1237 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1238 cpu_to_hw_sg(in_sg);
1241 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1242 sg->length = ses->iv.length;
1247 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1248 sg->length = mbuf->data_len - sym->auth.data.offset;
1249 sg->offset = sym->auth.data.offset;
1251 /* Successive segs */
1256 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1257 sg->length = mbuf->data_len;
1261 sg->length -= ses->digest_length;
1262 if (is_decode(ses)) {
1265 memcpy(ctx->digest, sym->auth.digest.data,
1266 ses->digest_length);
1267 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1268 sg->length = ses->digest_length;
1276 static inline struct dpaa_sec_job *
1277 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1279 struct rte_crypto_sym_op *sym = op->sym;
1280 struct dpaa_sec_job *cf;
1281 struct dpaa_sec_op_ctx *ctx;
1282 struct qm_sg_entry *sg;
1283 rte_iova_t src_start_addr, dst_start_addr;
1284 uint32_t length = 0;
1285 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1288 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1290 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1292 dst_start_addr = src_start_addr;
1294 ctx = dpaa_sec_alloc_ctx(ses);
1302 rte_prefetch0(cf->sg);
1304 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1305 if (is_encode(ses)) {
1306 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1307 sg->length = ses->iv.length;
1308 length += sg->length;
1312 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1313 sg->length = sym->auth.data.length;
1314 length += sg->length;
1318 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1319 sg->length = ses->iv.length;
1320 length += sg->length;
1325 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1326 sg->length = sym->auth.data.length;
1327 length += sg->length;
1330 memcpy(ctx->digest, sym->auth.digest.data,
1331 ses->digest_length);
1334 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1335 sg->length = ses->digest_length;
1336 length += sg->length;
1340 /* input compound frame */
1341 cf->sg[1].length = length;
1342 cf->sg[1].extension = 1;
1343 cf->sg[1].final = 1;
1344 cpu_to_hw_sg(&cf->sg[1]);
1348 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1349 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1350 sg->length = sym->cipher.data.length;
1351 length = sg->length;
1352 if (is_encode(ses)) {
1354 /* set auth output */
1356 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1357 sg->length = ses->digest_length;
1358 length += sg->length;
1363 /* output compound frame */
1364 cf->sg[0].length = length;
1365 cf->sg[0].extension = 1;
1366 cpu_to_hw_sg(&cf->sg[0]);
1371 static inline struct dpaa_sec_job *
1372 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1374 struct rte_crypto_sym_op *sym = op->sym;
1375 struct dpaa_sec_job *cf;
1376 struct dpaa_sec_op_ctx *ctx;
1377 struct qm_sg_entry *sg;
1378 phys_addr_t src_start_addr, dst_start_addr;
1380 ctx = dpaa_sec_alloc_ctx(ses);
1386 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1389 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1391 dst_start_addr = src_start_addr;
1395 qm_sg_entry_set64(sg, src_start_addr);
1396 sg->length = sym->m_src->pkt_len;
1400 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1403 qm_sg_entry_set64(sg, dst_start_addr);
1404 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1411 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1414 /* Function to transmit the frames to given device and queuepair */
1416 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1417 uint16_t num_tx = 0;
1418 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1419 uint32_t frames_to_send;
1420 struct rte_crypto_op *op;
1421 struct dpaa_sec_job *cf;
1422 dpaa_sec_session *ses;
1423 uint32_t auth_only_len;
1424 struct qman_fq *inq[DPAA_SEC_BURST];
1427 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1428 DPAA_SEC_BURST : nb_ops;
1429 for (loop = 0; loop < frames_to_send; loop++) {
1431 switch (op->sess_type) {
1432 case RTE_CRYPTO_OP_WITH_SESSION:
1433 ses = (dpaa_sec_session *)
1434 get_sym_session_private_data(
1436 cryptodev_driver_id);
1438 case RTE_CRYPTO_OP_SECURITY_SESSION:
1439 ses = (dpaa_sec_session *)
1440 get_sec_session_private_data(
1441 op->sym->sec_session);
1445 "sessionless crypto op not supported");
1446 frames_to_send = loop;
1450 if (unlikely(!ses->qp || ses->qp != qp)) {
1451 DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1453 if (dpaa_sec_attach_sess_q(qp, ses)) {
1454 frames_to_send = loop;
1460 auth_only_len = op->sym->auth.data.length -
1461 op->sym->cipher.data.length;
1462 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1463 if (is_auth_only(ses)) {
1464 cf = build_auth_only(op, ses);
1465 } else if (is_cipher_only(ses)) {
1466 cf = build_cipher_only(op, ses);
1467 } else if (is_aead(ses)) {
1468 cf = build_cipher_auth_gcm(op, ses);
1469 auth_only_len = ses->auth_only_len;
1470 } else if (is_auth_cipher(ses)) {
1471 cf = build_cipher_auth(op, ses);
1472 } else if (is_proto_ipsec(ses)) {
1473 cf = build_proto(op, ses);
1475 DPAA_SEC_DP_ERR("not supported ops");
1476 frames_to_send = loop;
1481 if (is_auth_only(ses)) {
1482 cf = build_auth_only_sg(op, ses);
1483 } else if (is_cipher_only(ses)) {
1484 cf = build_cipher_only_sg(op, ses);
1485 } else if (is_aead(ses)) {
1486 cf = build_cipher_auth_gcm_sg(op, ses);
1487 auth_only_len = ses->auth_only_len;
1488 } else if (is_auth_cipher(ses)) {
1489 cf = build_cipher_auth_sg(op, ses);
1491 DPAA_SEC_DP_ERR("not supported ops");
1492 frames_to_send = loop;
1497 if (unlikely(!cf)) {
1498 frames_to_send = loop;
1504 inq[loop] = ses->inq;
1505 fd->opaque_addr = 0;
1507 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1508 fd->_format1 = qm_fd_compound;
1509 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1510 /* Auth_only_len is set as 0 in descriptor and it is
1511 * overwritten here in the fd.cmd which will update
1515 fd->cmd = 0x80000000 | auth_only_len;
1520 while (loop < frames_to_send) {
1521 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1522 frames_to_send - loop);
1524 nb_ops -= frames_to_send;
1525 num_tx += frames_to_send;
1528 dpaa_qp->tx_pkts += num_tx;
1529 dpaa_qp->tx_errs += nb_ops - num_tx;
1535 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1539 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1541 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1543 dpaa_qp->rx_pkts += num_rx;
1544 dpaa_qp->rx_errs += nb_ops - num_rx;
1546 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1551 /** Release queue pair */
1553 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1556 struct dpaa_sec_dev_private *internals;
1557 struct dpaa_sec_qp *qp = NULL;
1559 PMD_INIT_FUNC_TRACE();
1561 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1563 internals = dev->data->dev_private;
1564 if (qp_id >= internals->max_nb_queue_pairs) {
1565 DPAA_SEC_ERR("Max supported qpid %d",
1566 internals->max_nb_queue_pairs);
1570 qp = &internals->qps[qp_id];
1571 qp->internals = NULL;
1572 dev->data->queue_pairs[qp_id] = NULL;
1577 /** Setup a queue pair */
1579 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1580 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1581 __rte_unused int socket_id,
1582 __rte_unused struct rte_mempool *session_pool)
1584 struct dpaa_sec_dev_private *internals;
1585 struct dpaa_sec_qp *qp = NULL;
1587 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1589 internals = dev->data->dev_private;
1590 if (qp_id >= internals->max_nb_queue_pairs) {
1591 DPAA_SEC_ERR("Max supported qpid %d",
1592 internals->max_nb_queue_pairs);
1596 qp = &internals->qps[qp_id];
1597 qp->internals = internals;
1598 dev->data->queue_pairs[qp_id] = qp;
1603 /** Return the number of allocated queue pairs */
1605 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1607 PMD_INIT_FUNC_TRACE();
1609 return dev->data->nb_queue_pairs;
1612 /** Returns the size of session structure */
1614 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1616 PMD_INIT_FUNC_TRACE();
1618 return sizeof(dpaa_sec_session);
1622 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1623 struct rte_crypto_sym_xform *xform,
1624 dpaa_sec_session *session)
1626 session->cipher_alg = xform->cipher.algo;
1627 session->iv.length = xform->cipher.iv.length;
1628 session->iv.offset = xform->cipher.iv.offset;
1629 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1630 RTE_CACHE_LINE_SIZE);
1631 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1632 DPAA_SEC_ERR("No Memory for cipher key");
1635 session->cipher_key.length = xform->cipher.key.length;
1637 memcpy(session->cipher_key.data, xform->cipher.key.data,
1638 xform->cipher.key.length);
1639 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1646 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1647 struct rte_crypto_sym_xform *xform,
1648 dpaa_sec_session *session)
1650 session->auth_alg = xform->auth.algo;
1651 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1652 RTE_CACHE_LINE_SIZE);
1653 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1654 DPAA_SEC_ERR("No Memory for auth key");
1657 session->auth_key.length = xform->auth.key.length;
1658 session->digest_length = xform->auth.digest_length;
1660 memcpy(session->auth_key.data, xform->auth.key.data,
1661 xform->auth.key.length);
1662 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1669 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1670 struct rte_crypto_sym_xform *xform,
1671 dpaa_sec_session *session)
1673 session->aead_alg = xform->aead.algo;
1674 session->iv.length = xform->aead.iv.length;
1675 session->iv.offset = xform->aead.iv.offset;
1676 session->auth_only_len = xform->aead.aad_length;
1677 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1678 RTE_CACHE_LINE_SIZE);
1679 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1680 DPAA_SEC_ERR("No Memory for aead key\n");
1683 session->aead_key.length = xform->aead.key.length;
1684 session->digest_length = xform->aead.digest_length;
1686 memcpy(session->aead_key.data, xform->aead.key.data,
1687 xform->aead.key.length);
1688 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1694 static struct qman_fq *
1695 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1699 for (i = 0; i < qi->max_nb_sessions; i++) {
1700 if (qi->inq_attach[i] == 0) {
1701 qi->inq_attach[i] = 1;
1705 DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1711 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1715 for (i = 0; i < qi->max_nb_sessions; i++) {
1716 if (&qi->inq[i] == fq) {
1717 qman_retire_fq(fq, NULL);
1719 qi->inq_attach[i] = 0;
1727 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1732 ret = dpaa_sec_prep_cdb(sess);
1734 DPAA_SEC_ERR("Unable to prepare sec cdb");
1737 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1738 ret = rte_dpaa_portal_init((void *)0);
1740 DPAA_SEC_ERR("Failure in affining portal");
1744 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1745 qman_fq_fqid(&qp->outq));
1747 DPAA_SEC_ERR("Unable to init sec queue");
1753 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1754 struct rte_crypto_sym_xform *xform, void *sess)
1756 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1757 dpaa_sec_session *session = sess;
1759 PMD_INIT_FUNC_TRACE();
1761 if (unlikely(sess == NULL)) {
1762 DPAA_SEC_ERR("invalid session struct");
1765 memset(session, 0, sizeof(dpaa_sec_session));
1767 /* Default IV length = 0 */
1768 session->iv.length = 0;
1771 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1772 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1773 dpaa_sec_cipher_init(dev, xform, session);
1775 /* Authentication Only */
1776 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1777 xform->next == NULL) {
1778 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1779 dpaa_sec_auth_init(dev, xform, session);
1781 /* Cipher then Authenticate */
1782 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1783 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1784 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1785 dpaa_sec_cipher_init(dev, xform, session);
1786 dpaa_sec_auth_init(dev, xform->next, session);
1788 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1792 /* Authenticate then Cipher */
1793 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1794 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1795 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1796 dpaa_sec_auth_init(dev, xform, session);
1797 dpaa_sec_cipher_init(dev, xform->next, session);
1799 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1803 /* AEAD operation for AES-GCM kind of Algorithms */
1804 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1805 xform->next == NULL) {
1806 dpaa_sec_aead_init(dev, xform, session);
1809 DPAA_SEC_ERR("Invalid crypto type");
1812 session->ctx_pool = internals->ctx_pool;
1813 session->inq = dpaa_sec_attach_rxq(internals);
1814 if (session->inq == NULL) {
1815 DPAA_SEC_ERR("unable to attach sec queue");
1822 rte_free(session->cipher_key.data);
1823 rte_free(session->auth_key.data);
1824 memset(session, 0, sizeof(dpaa_sec_session));
1830 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1831 struct rte_crypto_sym_xform *xform,
1832 struct rte_cryptodev_sym_session *sess,
1833 struct rte_mempool *mempool)
1835 void *sess_private_data;
1838 PMD_INIT_FUNC_TRACE();
1840 if (rte_mempool_get(mempool, &sess_private_data)) {
1841 DPAA_SEC_ERR("Couldn't get object from session mempool");
1845 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1847 DPAA_SEC_ERR("failed to configure session parameters");
1849 /* Return session to mempool */
1850 rte_mempool_put(mempool, sess_private_data);
1854 set_sym_session_private_data(sess, dev->driver_id,
1861 /** Clear the memory of session so it doesn't leave key material behind */
1863 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1864 struct rte_cryptodev_sym_session *sess)
1866 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1867 uint8_t index = dev->driver_id;
1868 void *sess_priv = get_sym_session_private_data(sess, index);
1870 PMD_INIT_FUNC_TRACE();
1872 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1875 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1878 dpaa_sec_detach_rxq(qi, s->inq);
1879 rte_free(s->cipher_key.data);
1880 rte_free(s->auth_key.data);
1881 memset(s, 0, sizeof(dpaa_sec_session));
1882 set_sym_session_private_data(sess, index, NULL);
1883 rte_mempool_put(sess_mp, sess_priv);
1888 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1889 struct rte_security_session_conf *conf,
1892 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1893 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1894 struct rte_crypto_auth_xform *auth_xform;
1895 struct rte_crypto_cipher_xform *cipher_xform;
1896 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1898 PMD_INIT_FUNC_TRACE();
1900 memset(session, 0, sizeof(dpaa_sec_session));
1901 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1902 cipher_xform = &conf->crypto_xform->cipher;
1903 auth_xform = &conf->crypto_xform->next->auth;
1905 auth_xform = &conf->crypto_xform->auth;
1906 cipher_xform = &conf->crypto_xform->next->cipher;
1908 session->proto_alg = conf->protocol;
1909 session->cipher_key.data = rte_zmalloc(NULL,
1910 cipher_xform->key.length,
1911 RTE_CACHE_LINE_SIZE);
1912 if (session->cipher_key.data == NULL &&
1913 cipher_xform->key.length > 0) {
1914 DPAA_SEC_ERR("No Memory for cipher key");
1918 session->cipher_key.length = cipher_xform->key.length;
1919 session->auth_key.data = rte_zmalloc(NULL,
1920 auth_xform->key.length,
1921 RTE_CACHE_LINE_SIZE);
1922 if (session->auth_key.data == NULL &&
1923 auth_xform->key.length > 0) {
1924 DPAA_SEC_ERR("No Memory for auth key");
1925 rte_free(session->cipher_key.data);
1928 session->auth_key.length = auth_xform->key.length;
1929 memcpy(session->cipher_key.data, cipher_xform->key.data,
1930 cipher_xform->key.length);
1931 memcpy(session->auth_key.data, auth_xform->key.data,
1932 auth_xform->key.length);
1934 switch (auth_xform->algo) {
1935 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1936 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1938 case RTE_CRYPTO_AUTH_MD5_HMAC:
1939 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1941 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1942 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1944 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1945 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1947 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1948 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1950 case RTE_CRYPTO_AUTH_AES_CMAC:
1951 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1953 case RTE_CRYPTO_AUTH_NULL:
1954 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1956 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1957 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1958 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1959 case RTE_CRYPTO_AUTH_SHA1:
1960 case RTE_CRYPTO_AUTH_SHA256:
1961 case RTE_CRYPTO_AUTH_SHA512:
1962 case RTE_CRYPTO_AUTH_SHA224:
1963 case RTE_CRYPTO_AUTH_SHA384:
1964 case RTE_CRYPTO_AUTH_MD5:
1965 case RTE_CRYPTO_AUTH_AES_GMAC:
1966 case RTE_CRYPTO_AUTH_KASUMI_F9:
1967 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1968 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1969 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
1973 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
1978 switch (cipher_xform->algo) {
1979 case RTE_CRYPTO_CIPHER_AES_CBC:
1980 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1982 case RTE_CRYPTO_CIPHER_3DES_CBC:
1983 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1985 case RTE_CRYPTO_CIPHER_AES_CTR:
1986 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1988 case RTE_CRYPTO_CIPHER_NULL:
1989 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1990 case RTE_CRYPTO_CIPHER_3DES_ECB:
1991 case RTE_CRYPTO_CIPHER_AES_ECB:
1992 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1993 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1994 cipher_xform->algo);
1997 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
1998 cipher_xform->algo);
2002 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2003 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2004 sizeof(session->ip4_hdr));
2005 session->ip4_hdr.ip_v = IPVERSION;
2006 session->ip4_hdr.ip_hl = 5;
2007 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2008 sizeof(session->ip4_hdr));
2009 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2010 session->ip4_hdr.ip_id = 0;
2011 session->ip4_hdr.ip_off = 0;
2012 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2013 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2014 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2016 session->ip4_hdr.ip_sum = 0;
2017 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2018 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2019 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2020 (void *)&session->ip4_hdr,
2023 session->encap_pdb.options =
2024 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2025 PDBOPTS_ESP_OIHI_PDB_INL |
2027 PDBHMO_ESP_ENCAP_DTTL;
2028 session->encap_pdb.spi = ipsec_xform->spi;
2029 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2031 session->dir = DIR_ENC;
2032 } else if (ipsec_xform->direction ==
2033 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2034 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2035 session->decap_pdb.options = sizeof(struct ip) << 16;
2036 session->dir = DIR_DEC;
2039 session->ctx_pool = internals->ctx_pool;
2040 session->inq = dpaa_sec_attach_rxq(internals);
2041 if (session->inq == NULL) {
2042 DPAA_SEC_ERR("unable to attach sec queue");
2049 rte_free(session->auth_key.data);
2050 rte_free(session->cipher_key.data);
2051 memset(session, 0, sizeof(dpaa_sec_session));
2056 dpaa_sec_security_session_create(void *dev,
2057 struct rte_security_session_conf *conf,
2058 struct rte_security_session *sess,
2059 struct rte_mempool *mempool)
2061 void *sess_private_data;
2062 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2065 if (rte_mempool_get(mempool, &sess_private_data)) {
2066 DPAA_SEC_ERR("Couldn't get object from session mempool");
2070 switch (conf->protocol) {
2071 case RTE_SECURITY_PROTOCOL_IPSEC:
2072 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2075 case RTE_SECURITY_PROTOCOL_MACSEC:
2081 DPAA_SEC_ERR("failed to configure session parameters");
2082 /* Return session to mempool */
2083 rte_mempool_put(mempool, sess_private_data);
2087 set_sec_session_private_data(sess, sess_private_data);
2092 /** Clear the memory of session so it doesn't leave key material behind */
2094 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2095 struct rte_security_session *sess)
2097 PMD_INIT_FUNC_TRACE();
2098 void *sess_priv = get_sec_session_private_data(sess);
2100 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2103 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2105 rte_free(s->cipher_key.data);
2106 rte_free(s->auth_key.data);
2107 memset(sess, 0, sizeof(dpaa_sec_session));
2108 set_sec_session_private_data(sess, NULL);
2109 rte_mempool_put(sess_mp, sess_priv);
2116 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2117 struct rte_cryptodev_config *config __rte_unused)
2121 struct dpaa_sec_dev_private *internals;
2123 PMD_INIT_FUNC_TRACE();
2125 internals = dev->data->dev_private;
2126 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2127 if (!internals->ctx_pool) {
2128 internals->ctx_pool = rte_mempool_create((const char *)str,
2131 CTX_POOL_CACHE_SIZE, 0,
2132 NULL, NULL, NULL, NULL,
2134 if (!internals->ctx_pool) {
2135 DPAA_SEC_ERR("%s create failed\n", str);
2139 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2146 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2148 PMD_INIT_FUNC_TRACE();
2153 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2155 PMD_INIT_FUNC_TRACE();
2159 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2161 struct dpaa_sec_dev_private *internals;
2163 PMD_INIT_FUNC_TRACE();
2168 internals = dev->data->dev_private;
2169 rte_mempool_free(internals->ctx_pool);
2170 internals->ctx_pool = NULL;
2176 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2177 struct rte_cryptodev_info *info)
2179 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2181 PMD_INIT_FUNC_TRACE();
2183 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2184 info->feature_flags = dev->feature_flags;
2185 info->capabilities = dpaa_sec_capabilities;
2186 info->sym.max_nb_sessions = internals->max_nb_sessions;
2187 info->driver_id = cryptodev_driver_id;
2191 static struct rte_cryptodev_ops crypto_ops = {
2192 .dev_configure = dpaa_sec_dev_configure,
2193 .dev_start = dpaa_sec_dev_start,
2194 .dev_stop = dpaa_sec_dev_stop,
2195 .dev_close = dpaa_sec_dev_close,
2196 .dev_infos_get = dpaa_sec_dev_infos_get,
2197 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2198 .queue_pair_release = dpaa_sec_queue_pair_release,
2199 .queue_pair_count = dpaa_sec_queue_pair_count,
2200 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2201 .sym_session_configure = dpaa_sec_sym_session_configure,
2202 .sym_session_clear = dpaa_sec_sym_session_clear
2205 static const struct rte_security_capability *
2206 dpaa_sec_capabilities_get(void *device __rte_unused)
2208 return dpaa_sec_security_cap;
2211 struct rte_security_ops dpaa_sec_security_ops = {
2212 .session_create = dpaa_sec_security_session_create,
2213 .session_update = NULL,
2214 .session_stats_get = NULL,
2215 .session_destroy = dpaa_sec_security_session_destroy,
2216 .set_pkt_metadata = NULL,
2217 .capabilities_get = dpaa_sec_capabilities_get
2221 dpaa_sec_uninit(struct rte_cryptodev *dev)
2223 struct dpaa_sec_dev_private *internals;
2228 internals = dev->data->dev_private;
2229 rte_free(dev->security_ctx);
2231 /* In case close has been called, internals->ctx_pool would be NULL */
2232 rte_mempool_free(internals->ctx_pool);
2233 rte_free(internals);
2235 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2236 dev->data->name, rte_socket_id());
2242 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2244 struct dpaa_sec_dev_private *internals;
2245 struct rte_security_ctx *security_instance;
2246 struct dpaa_sec_qp *qp;
2250 PMD_INIT_FUNC_TRACE();
2252 cryptodev->driver_id = cryptodev_driver_id;
2253 cryptodev->dev_ops = &crypto_ops;
2255 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2256 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2257 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2258 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2259 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2260 RTE_CRYPTODEV_FF_SECURITY |
2261 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2262 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2263 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2264 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2265 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2267 internals = cryptodev->data->dev_private;
2268 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2269 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2272 * For secondary processes, we don't initialise any further as primary
2273 * has already done this work. Only check we don't need a different
2276 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2277 DPAA_SEC_WARN("Device already init by primary process");
2281 /* Initialize security_ctx only for primary process*/
2282 security_instance = rte_malloc("rte_security_instances_ops",
2283 sizeof(struct rte_security_ctx), 0);
2284 if (security_instance == NULL)
2286 security_instance->device = (void *)cryptodev;
2287 security_instance->ops = &dpaa_sec_security_ops;
2288 security_instance->sess_cnt = 0;
2289 cryptodev->security_ctx = security_instance;
2291 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2292 /* init qman fq for queue pair */
2293 qp = &internals->qps[i];
2294 ret = dpaa_sec_init_tx(&qp->outq);
2296 DPAA_SEC_ERR("config tx of queue pair %d", i);
2301 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2302 QMAN_FQ_FLAG_TO_DCPORTAL;
2303 for (i = 0; i < internals->max_nb_sessions; i++) {
2304 /* create rx qman fq for sessions*/
2305 ret = qman_create_fq(0, flags, &internals->inq[i]);
2306 if (unlikely(ret != 0)) {
2307 DPAA_SEC_ERR("sec qman_create_fq failed");
2312 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2316 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2318 dpaa_sec_uninit(cryptodev);
2323 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2324 struct rte_dpaa_device *dpaa_dev)
2326 struct rte_cryptodev *cryptodev;
2327 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2331 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2333 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2334 if (cryptodev == NULL)
2337 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2338 cryptodev->data->dev_private = rte_zmalloc_socket(
2339 "cryptodev private structure",
2340 sizeof(struct dpaa_sec_dev_private),
2341 RTE_CACHE_LINE_SIZE,
2344 if (cryptodev->data->dev_private == NULL)
2345 rte_panic("Cannot allocate memzone for private "
2349 dpaa_dev->crypto_dev = cryptodev;
2350 cryptodev->device = &dpaa_dev->device;
2352 /* init user callbacks */
2353 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2355 /* if sec device version is not configured */
2356 if (!rta_get_sec_era()) {
2357 const struct device_node *caam_node;
2359 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2360 const uint32_t *prop = of_get_property(caam_node,
2365 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2371 /* Invoke PMD device initialization function */
2372 retval = dpaa_sec_dev_init(cryptodev);
2376 /* In case of error, cleanup is done */
2377 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2378 rte_free(cryptodev->data->dev_private);
2380 rte_cryptodev_pmd_release_device(cryptodev);
2386 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2388 struct rte_cryptodev *cryptodev;
2391 cryptodev = dpaa_dev->crypto_dev;
2392 if (cryptodev == NULL)
2395 ret = dpaa_sec_uninit(cryptodev);
2399 return rte_cryptodev_pmd_destroy(cryptodev);
2402 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2403 .drv_type = FSL_DPAA_CRYPTO,
2405 .name = "DPAA SEC PMD"
2407 .probe = cryptodev_dpaa_sec_probe,
2408 .remove = cryptodev_dpaa_sec_remove,
2411 static struct cryptodev_driver dpaa_sec_crypto_drv;
2413 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2414 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2415 cryptodev_driver_id);
2417 RTE_INIT(dpaa_sec_init_log)
2419 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2420 if (dpaa_logtype_sec >= 0)
2421 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);