1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cycles.h>
20 #include <rte_kvargs.h>
21 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_string_fns.h>
30 /* RTA header files */
31 #include <hw/desc/common.h>
32 #include <hw/desc/algo.h>
33 #include <hw/desc/ipsec.h>
35 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec_log.h>
39 enum rta_sec_era rta_sec_era;
41 static uint8_t cryptodev_driver_id;
43 static __thread struct rte_crypto_op **dpaa_sec_ops;
44 static __thread int dpaa_sec_op_nb;
47 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
50 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 if (!ctx->fd_status) {
53 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
56 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
59 /* report op status to sym->op and then free the ctx memeory */
60 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
63 static inline struct dpaa_sec_op_ctx *
64 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 struct dpaa_sec_op_ctx *ctx;
69 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
75 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78 * each packet, memset is costlier than dcbz_64().
80 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
81 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
82 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85 ctx->ctx_pool = ses->ctx_pool;
86 ctx->vtop_offset = (uint64_t) ctx
87 - rte_mempool_virt2iova(ctx);
92 static inline rte_iova_t
93 dpaa_mem_vtop(void *vaddr)
95 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
96 uint64_t vaddr_64, paddr;
99 vaddr_64 = (uint64_t)vaddr;
100 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
101 if (vaddr_64 >= memseg[i].addr_64 &&
102 vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
103 paddr = memseg[i].iova +
104 (vaddr_64 - memseg[i].addr_64);
106 return (rte_iova_t)paddr;
109 return (rte_iova_t)(NULL);
112 /* virtual address conversin when mempool support is available for ctx */
113 static inline phys_addr_t
114 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
116 return (uint64_t)vaddr - ctx->vtop_offset;
120 dpaa_mem_ptov(rte_iova_t paddr)
122 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
125 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
126 if (paddr >= memseg[i].iova &&
127 (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
128 return (void *)(memseg[i].addr_64 +
129 (paddr - memseg[i].iova));
135 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
137 const struct qm_mr_entry *msg)
139 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
140 fq->fqid, msg->ern.rc, msg->ern.seqnum);
143 /* initialize the queue with dest chan as caam chan so that
144 * all the packets in this queue could be dispatched into caam
147 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
150 struct qm_mcc_initfq fq_opts;
154 /* Clear FQ options */
155 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
157 flags = QMAN_INITFQ_FLAG_SCHED;
158 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
159 QM_INITFQ_WE_CONTEXTB;
161 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
162 fq_opts.fqd.context_b = fqid_out;
163 fq_opts.fqd.dest.channel = qm_channel_caam;
164 fq_opts.fqd.dest.wq = 0;
166 fq_in->cb.ern = ern_sec_fq_handler;
168 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
170 ret = qman_init_fq(fq_in, flags, &fq_opts);
171 if (unlikely(ret != 0))
172 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
177 /* something is put into in_fq and caam put the crypto result into out_fq */
178 static enum qman_cb_dqrr_result
179 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
180 struct qman_fq *fq __always_unused,
181 const struct qm_dqrr_entry *dqrr)
183 const struct qm_fd *fd;
184 struct dpaa_sec_job *job;
185 struct dpaa_sec_op_ctx *ctx;
187 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
188 return qman_cb_dqrr_defer;
190 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
191 return qman_cb_dqrr_consume;
194 /* sg is embedded in an op ctx,
195 * sg[0] is for output
198 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
199 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
200 ctx->fd_status = fd->status;
201 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
202 dpaa_sec_op_ending(ctx);
204 return qman_cb_dqrr_consume;
207 /* caam result is put into this queue */
209 dpaa_sec_init_tx(struct qman_fq *fq)
212 struct qm_mcc_initfq opts;
215 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
216 QMAN_FQ_FLAG_DYNAMIC_FQID;
218 ret = qman_create_fq(0, flags, fq);
220 PMD_INIT_LOG(ERR, "qman_create_fq failed");
224 memset(&opts, 0, sizeof(opts));
225 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
226 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
228 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
230 fq->cb.dqrr = dqrr_out_fq_cb_rx;
231 fq->cb.ern = ern_sec_fq_handler;
233 ret = qman_init_fq(fq, 0, &opts);
235 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
242 static inline int is_cipher_only(dpaa_sec_session *ses)
244 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
245 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
248 static inline int is_auth_only(dpaa_sec_session *ses)
250 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
251 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
254 static inline int is_aead(dpaa_sec_session *ses)
256 return ((ses->cipher_alg == 0) &&
257 (ses->auth_alg == 0) &&
258 (ses->aead_alg != 0));
261 static inline int is_auth_cipher(dpaa_sec_session *ses)
263 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
264 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
267 static inline int is_encode(dpaa_sec_session *ses)
269 return ses->dir == DIR_ENC;
272 static inline int is_decode(dpaa_sec_session *ses)
274 return ses->dir == DIR_DEC;
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
280 switch (ses->auth_alg) {
281 case RTE_CRYPTO_AUTH_NULL:
282 ses->digest_length = 0;
284 case RTE_CRYPTO_AUTH_MD5_HMAC:
285 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
286 alginfo_a->algmode = OP_ALG_AAI_HMAC;
288 case RTE_CRYPTO_AUTH_SHA1_HMAC:
289 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
290 alginfo_a->algmode = OP_ALG_AAI_HMAC;
292 case RTE_CRYPTO_AUTH_SHA224_HMAC:
293 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
294 alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 case RTE_CRYPTO_AUTH_SHA256_HMAC:
297 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
298 alginfo_a->algmode = OP_ALG_AAI_HMAC;
300 case RTE_CRYPTO_AUTH_SHA384_HMAC:
301 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
302 alginfo_a->algmode = OP_ALG_AAI_HMAC;
304 case RTE_CRYPTO_AUTH_SHA512_HMAC:
305 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
314 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
316 switch (ses->cipher_alg) {
317 case RTE_CRYPTO_CIPHER_NULL:
319 case RTE_CRYPTO_CIPHER_AES_CBC:
320 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
321 alginfo_c->algmode = OP_ALG_AAI_CBC;
323 case RTE_CRYPTO_CIPHER_3DES_CBC:
324 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
325 alginfo_c->algmode = OP_ALG_AAI_CBC;
327 case RTE_CRYPTO_CIPHER_AES_CTR:
328 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
329 alginfo_c->algmode = OP_ALG_AAI_CTR;
332 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
337 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
339 switch (ses->aead_alg) {
340 case RTE_CRYPTO_AEAD_AES_GCM:
341 alginfo->algtype = OP_ALG_ALGSEL_AES;
342 alginfo->algmode = OP_ALG_AAI_GCM;
345 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
350 /* prepare command block of the session */
352 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
354 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
355 uint32_t shared_desc_len = 0;
356 struct sec_cdb *cdb = &ses->cdb;
358 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
364 memset(cdb, 0, sizeof(struct sec_cdb));
366 if (is_cipher_only(ses)) {
367 caam_cipher_alg(ses, &alginfo_c);
368 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
369 PMD_TX_LOG(ERR, "not supported cipher alg\n");
373 alginfo_c.key = (uint64_t)ses->cipher_key.data;
374 alginfo_c.keylen = ses->cipher_key.length;
375 alginfo_c.key_enc_flags = 0;
376 alginfo_c.key_type = RTA_DATA_IMM;
378 shared_desc_len = cnstr_shdsc_blkcipher(
384 } else if (is_auth_only(ses)) {
385 caam_auth_alg(ses, &alginfo_a);
386 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387 PMD_TX_LOG(ERR, "not supported auth alg\n");
391 alginfo_a.key = (uint64_t)ses->auth_key.data;
392 alginfo_a.keylen = ses->auth_key.length;
393 alginfo_a.key_enc_flags = 0;
394 alginfo_a.key_type = RTA_DATA_IMM;
396 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
400 } else if (is_aead(ses)) {
401 caam_aead_alg(ses, &alginfo);
402 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
403 PMD_TX_LOG(ERR, "not supported aead alg\n");
406 alginfo.key = (uint64_t)ses->aead_key.data;
407 alginfo.keylen = ses->aead_key.length;
408 alginfo.key_enc_flags = 0;
409 alginfo.key_type = RTA_DATA_IMM;
411 if (ses->dir == DIR_ENC)
412 shared_desc_len = cnstr_shdsc_gcm_encap(
413 cdb->sh_desc, true, swap,
418 shared_desc_len = cnstr_shdsc_gcm_decap(
419 cdb->sh_desc, true, swap,
424 caam_cipher_alg(ses, &alginfo_c);
425 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
426 PMD_TX_LOG(ERR, "not supported cipher alg\n");
430 alginfo_c.key = (uint64_t)ses->cipher_key.data;
431 alginfo_c.keylen = ses->cipher_key.length;
432 alginfo_c.key_enc_flags = 0;
433 alginfo_c.key_type = RTA_DATA_IMM;
435 caam_auth_alg(ses, &alginfo_a);
436 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
437 PMD_TX_LOG(ERR, "not supported auth alg\n");
441 alginfo_a.key = (uint64_t)ses->auth_key.data;
442 alginfo_a.keylen = ses->auth_key.length;
443 alginfo_a.key_enc_flags = 0;
444 alginfo_a.key_type = RTA_DATA_IMM;
446 cdb->sh_desc[0] = alginfo_c.keylen;
447 cdb->sh_desc[1] = alginfo_a.keylen;
448 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
450 (unsigned int *)cdb->sh_desc,
451 &cdb->sh_desc[2], 2);
454 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
457 if (cdb->sh_desc[2] & 1)
458 alginfo_c.key_type = RTA_DATA_IMM;
460 alginfo_c.key = (uint64_t)dpaa_mem_vtop(
461 (void *)alginfo_c.key);
462 alginfo_c.key_type = RTA_DATA_PTR;
464 if (cdb->sh_desc[2] & (1<<1))
465 alginfo_a.key_type = RTA_DATA_IMM;
467 alginfo_a.key = (uint64_t)dpaa_mem_vtop(
468 (void *)alginfo_a.key);
469 alginfo_a.key_type = RTA_DATA_PTR;
475 /* Auth_only_len is set as 0 here and it will be overwritten
476 * in fd for each packet.
478 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
479 true, swap, &alginfo_c, &alginfo_a,
481 ses->digest_length, ses->dir);
483 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
484 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
485 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
490 static inline unsigned int
491 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
493 unsigned int pkts = 0;
495 struct qm_mcr_queryfq_np np;
496 enum qman_fq_state state;
500 qman_query_fq_np(fq, &np);
502 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
504 vdqcr |= QM_VDQCR_EXACT;
505 ret = qman_volatile_dequeue(fq, 0, vdqcr);
509 pkts += qman_poll_dqrr(len);
510 qman_fq_state(fq, &state, &flags);
511 } while (flags & QMAN_FQ_STATE_VDQCR);
516 /* qp is lockless, should be accessed by only one thread */
518 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
526 if (unlikely(nb_ops > DPAA_SEC_BURST))
527 nb_ops = DPAA_SEC_BURST;
529 return dpaa_volatile_deq(fq, nb_ops, 1);
534 * |<----data_len------->|
535 * |ip_header|ah_header|icv|payload|
540 static inline struct dpaa_sec_job *
541 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
543 struct rte_crypto_sym_op *sym = op->sym;
544 struct rte_mbuf *mbuf = sym->m_src;
545 struct dpaa_sec_job *cf;
546 struct dpaa_sec_op_ctx *ctx;
547 struct qm_sg_entry *sg;
548 rte_iova_t start_addr;
551 ctx = dpaa_sec_alloc_ctx(ses);
557 old_digest = ctx->digest;
559 start_addr = rte_pktmbuf_iova(mbuf);
562 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
563 sg->length = ses->digest_length;
568 if (is_decode(ses)) {
569 /* need to extend the input to a compound frame */
571 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
572 sg->length = sym->auth.data.length + ses->digest_length;
577 /* hash result or digest, save digest first */
578 rte_memcpy(old_digest, sym->auth.digest.data,
580 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
581 sg->length = sym->auth.data.length;
584 /* let's check digest by hw */
585 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
587 qm_sg_entry_set64(sg, start_addr);
588 sg->length = ses->digest_length;
592 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
593 sg->length = sym->auth.data.length;
601 static inline struct dpaa_sec_job *
602 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
604 struct rte_crypto_sym_op *sym = op->sym;
605 struct dpaa_sec_job *cf;
606 struct dpaa_sec_op_ctx *ctx;
607 struct qm_sg_entry *sg;
608 rte_iova_t src_start_addr, dst_start_addr;
609 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
612 ctx = dpaa_sec_alloc_ctx(ses);
619 src_start_addr = rte_pktmbuf_iova(sym->m_src);
622 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
624 dst_start_addr = src_start_addr;
628 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
629 sg->length = sym->cipher.data.length + ses->iv.length;
635 /* need to extend the input to a compound frame */
638 sg->length = sym->cipher.data.length + ses->iv.length;
639 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
643 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
644 sg->length = ses->iv.length;
648 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
649 sg->length = sym->cipher.data.length;
656 static inline struct dpaa_sec_job *
657 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
659 struct rte_crypto_sym_op *sym = op->sym;
660 struct dpaa_sec_job *cf;
661 struct dpaa_sec_op_ctx *ctx;
662 struct qm_sg_entry *sg;
664 rte_iova_t src_start_addr, dst_start_addr;
665 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
668 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
671 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
673 dst_start_addr = src_start_addr;
675 ctx = dpaa_sec_alloc_ctx(ses);
683 rte_prefetch0(cf->sg);
685 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
686 if (is_encode(ses)) {
687 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
688 sg->length = ses->iv.length;
689 length += sg->length;
693 if (ses->auth_only_len) {
694 qm_sg_entry_set64(sg,
695 dpaa_mem_vtop(sym->aead.aad.data));
696 sg->length = ses->auth_only_len;
697 length += sg->length;
701 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
702 sg->length = sym->aead.data.length;
703 length += sg->length;
707 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
708 sg->length = ses->iv.length;
709 length += sg->length;
713 if (ses->auth_only_len) {
714 qm_sg_entry_set64(sg,
715 dpaa_mem_vtop(sym->aead.aad.data));
716 sg->length = ses->auth_only_len;
717 length += sg->length;
721 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
722 sg->length = sym->aead.data.length;
723 length += sg->length;
726 memcpy(ctx->digest, sym->aead.digest.data,
730 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
731 sg->length = ses->digest_length;
732 length += sg->length;
736 /* input compound frame */
737 cf->sg[1].length = length;
738 cf->sg[1].extension = 1;
740 cpu_to_hw_sg(&cf->sg[1]);
744 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
745 qm_sg_entry_set64(sg,
746 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
747 sg->length = sym->aead.data.length + ses->auth_only_len;
749 if (is_encode(ses)) {
751 /* set auth output */
753 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
754 sg->length = ses->digest_length;
755 length += sg->length;
760 /* output compound frame */
761 cf->sg[0].length = length;
762 cf->sg[0].extension = 1;
763 cpu_to_hw_sg(&cf->sg[0]);
768 static inline struct dpaa_sec_job *
769 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
771 struct rte_crypto_sym_op *sym = op->sym;
772 struct dpaa_sec_job *cf;
773 struct dpaa_sec_op_ctx *ctx;
774 struct qm_sg_entry *sg;
775 rte_iova_t src_start_addr, dst_start_addr;
777 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
780 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
782 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
784 dst_start_addr = src_start_addr;
786 ctx = dpaa_sec_alloc_ctx(ses);
794 rte_prefetch0(cf->sg);
796 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
797 if (is_encode(ses)) {
798 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
799 sg->length = ses->iv.length;
800 length += sg->length;
804 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
805 sg->length = sym->auth.data.length;
806 length += sg->length;
810 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
811 sg->length = ses->iv.length;
812 length += sg->length;
817 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
818 sg->length = sym->auth.data.length;
819 length += sg->length;
822 memcpy(ctx->digest, sym->auth.digest.data,
826 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
827 sg->length = ses->digest_length;
828 length += sg->length;
832 /* input compound frame */
833 cf->sg[1].length = length;
834 cf->sg[1].extension = 1;
836 cpu_to_hw_sg(&cf->sg[1]);
840 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
841 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
842 sg->length = sym->cipher.data.length;
844 if (is_encode(ses)) {
846 /* set auth output */
848 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
849 sg->length = ses->digest_length;
850 length += sg->length;
855 /* output compound frame */
856 cf->sg[0].length = length;
857 cf->sg[0].extension = 1;
858 cpu_to_hw_sg(&cf->sg[0]);
864 dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
866 struct dpaa_sec_job *cf;
867 dpaa_sec_session *ses;
870 uint32_t auth_only_len = op->sym->auth.data.length -
871 op->sym->cipher.data.length;
873 ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
874 cryptodev_driver_id);
876 if (unlikely(!ses->qp || ses->qp != qp)) {
877 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
878 if (dpaa_sec_attach_sess_q(qp, ses))
883 * Segmented buffer is not supported.
885 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
886 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
889 if (is_auth_only(ses)) {
890 cf = build_auth_only(op, ses);
891 } else if (is_cipher_only(ses)) {
892 cf = build_cipher_only(op, ses);
893 } else if (is_aead(ses)) {
894 cf = build_cipher_auth_gcm(op, ses);
895 auth_only_len = ses->auth_only_len;
896 } else if (is_auth_cipher(ses)) {
897 cf = build_cipher_auth(op, ses);
899 PMD_TX_LOG(ERR, "not supported sec op");
905 memset(&fd, 0, sizeof(struct qm_fd));
906 qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
907 fd._format1 = qm_fd_compound;
908 fd.length29 = 2 * sizeof(struct qm_sg_entry);
909 /* Auth_only_len is set as 0 in descriptor and it is overwritten
910 * here in the fd.cmd which will update the DPOVRD reg.
913 fd.cmd = 0x80000000 | auth_only_len;
915 ret = qman_enqueue(ses->inq, &fd, 0);
922 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
925 /* Function to transmit the frames to given device and queuepair */
928 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
931 if (unlikely(nb_ops == 0))
934 /*Prepare each packet which is to be sent*/
935 for (loop = 0; loop < nb_ops; loop++) {
936 if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
937 PMD_TX_LOG(ERR, "sessionless crypto op not supported");
940 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
944 dpaa_qp->tx_pkts += num_tx;
945 dpaa_qp->tx_errs += nb_ops - num_tx;
951 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
955 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
957 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
959 dpaa_qp->rx_pkts += num_rx;
960 dpaa_qp->rx_errs += nb_ops - num_rx;
962 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
967 /** Release queue pair */
969 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
972 struct dpaa_sec_dev_private *internals;
973 struct dpaa_sec_qp *qp = NULL;
975 PMD_INIT_FUNC_TRACE();
977 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
979 internals = dev->data->dev_private;
980 if (qp_id >= internals->max_nb_queue_pairs) {
981 PMD_INIT_LOG(ERR, "Max supported qpid %d",
982 internals->max_nb_queue_pairs);
986 qp = &internals->qps[qp_id];
987 qp->internals = NULL;
988 dev->data->queue_pairs[qp_id] = NULL;
993 /** Setup a queue pair */
995 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
996 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
997 __rte_unused int socket_id,
998 __rte_unused struct rte_mempool *session_pool)
1000 struct dpaa_sec_dev_private *internals;
1001 struct dpaa_sec_qp *qp = NULL;
1003 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1004 dev, qp_id, qp_conf);
1006 internals = dev->data->dev_private;
1007 if (qp_id >= internals->max_nb_queue_pairs) {
1008 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1009 internals->max_nb_queue_pairs);
1013 qp = &internals->qps[qp_id];
1014 qp->internals = internals;
1015 dev->data->queue_pairs[qp_id] = qp;
1020 /** Start queue pair */
1022 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1023 __rte_unused uint16_t queue_pair_id)
1025 PMD_INIT_FUNC_TRACE();
1030 /** Stop queue pair */
1032 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1033 __rte_unused uint16_t queue_pair_id)
1035 PMD_INIT_FUNC_TRACE();
1040 /** Return the number of allocated queue pairs */
1042 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1044 PMD_INIT_FUNC_TRACE();
1046 return dev->data->nb_queue_pairs;
1049 /** Returns the size of session structure */
1051 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1053 PMD_INIT_FUNC_TRACE();
1055 return sizeof(dpaa_sec_session);
1059 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1060 struct rte_crypto_sym_xform *xform,
1061 dpaa_sec_session *session)
1063 session->cipher_alg = xform->cipher.algo;
1064 session->iv.length = xform->cipher.iv.length;
1065 session->iv.offset = xform->cipher.iv.offset;
1066 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1067 RTE_CACHE_LINE_SIZE);
1068 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1069 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1072 session->cipher_key.length = xform->cipher.key.length;
1074 memcpy(session->cipher_key.data, xform->cipher.key.data,
1075 xform->cipher.key.length);
1076 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1083 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1084 struct rte_crypto_sym_xform *xform,
1085 dpaa_sec_session *session)
1087 session->auth_alg = xform->auth.algo;
1088 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1089 RTE_CACHE_LINE_SIZE);
1090 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1091 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1094 session->auth_key.length = xform->auth.key.length;
1095 session->digest_length = xform->auth.digest_length;
1097 memcpy(session->auth_key.data, xform->auth.key.data,
1098 xform->auth.key.length);
1099 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1106 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1107 struct rte_crypto_sym_xform *xform,
1108 dpaa_sec_session *session)
1110 session->aead_alg = xform->aead.algo;
1111 session->iv.length = xform->aead.iv.length;
1112 session->iv.offset = xform->aead.iv.offset;
1113 session->auth_only_len = xform->aead.aad_length;
1114 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1115 RTE_CACHE_LINE_SIZE);
1116 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1117 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1120 session->aead_key.length = xform->aead.key.length;
1121 session->digest_length = xform->aead.digest_length;
1123 memcpy(session->aead_key.data, xform->aead.key.data,
1124 xform->aead.key.length);
1125 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1131 static struct qman_fq *
1132 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1136 for (i = 0; i < qi->max_nb_sessions; i++) {
1137 if (qi->inq_attach[i] == 0) {
1138 qi->inq_attach[i] = 1;
1142 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1148 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1152 for (i = 0; i < qi->max_nb_sessions; i++) {
1153 if (&qi->inq[i] == fq) {
1154 qi->inq_attach[i] = 0;
1162 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1167 ret = dpaa_sec_prep_cdb(sess);
1169 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1173 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1174 qman_fq_fqid(&qp->outq));
1176 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1182 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1183 uint16_t qp_id __rte_unused,
1184 void *ses __rte_unused)
1186 PMD_INIT_FUNC_TRACE();
1191 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1192 uint16_t qp_id __rte_unused,
1195 dpaa_sec_session *sess = ses;
1196 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1198 PMD_INIT_FUNC_TRACE();
1201 dpaa_sec_detach_rxq(qi, sess->inq);
1210 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1211 struct rte_crypto_sym_xform *xform, void *sess)
1213 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1214 dpaa_sec_session *session = sess;
1216 PMD_INIT_FUNC_TRACE();
1218 if (unlikely(sess == NULL)) {
1219 RTE_LOG(ERR, PMD, "invalid session struct\n");
1223 /* Default IV length = 0 */
1224 session->iv.length = 0;
1227 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1228 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1229 dpaa_sec_cipher_init(dev, xform, session);
1231 /* Authentication Only */
1232 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1233 xform->next == NULL) {
1234 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1235 dpaa_sec_auth_init(dev, xform, session);
1237 /* Cipher then Authenticate */
1238 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1239 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1240 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1241 dpaa_sec_cipher_init(dev, xform, session);
1242 dpaa_sec_auth_init(dev, xform->next, session);
1244 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1248 /* Authenticate then Cipher */
1249 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1250 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1251 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1252 dpaa_sec_auth_init(dev, xform, session);
1253 dpaa_sec_cipher_init(dev, xform->next, session);
1255 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1259 /* AEAD operation for AES-GCM kind of Algorithms */
1260 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1261 xform->next == NULL) {
1262 dpaa_sec_aead_init(dev, xform, session);
1265 PMD_DRV_LOG(ERR, "Invalid crypto type");
1268 session->ctx_pool = internals->ctx_pool;
1269 session->inq = dpaa_sec_attach_rxq(internals);
1270 if (session->inq == NULL) {
1271 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1278 rte_free(session->cipher_key.data);
1279 rte_free(session->auth_key.data);
1280 memset(session, 0, sizeof(dpaa_sec_session));
1286 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1287 struct rte_crypto_sym_xform *xform,
1288 struct rte_cryptodev_sym_session *sess,
1289 struct rte_mempool *mempool)
1291 void *sess_private_data;
1294 PMD_INIT_FUNC_TRACE();
1296 if (rte_mempool_get(mempool, &sess_private_data)) {
1298 "Couldn't get object from session mempool");
1302 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1304 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1305 "session parameters");
1307 /* Return session to mempool */
1308 rte_mempool_put(mempool, sess_private_data);
1312 set_session_private_data(sess, dev->driver_id,
1319 /** Clear the memory of session so it doesn't leave key material behind */
1321 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1322 struct rte_cryptodev_sym_session *sess)
1324 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1325 uint8_t index = dev->driver_id;
1326 void *sess_priv = get_session_private_data(sess, index);
1328 PMD_INIT_FUNC_TRACE();
1330 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1333 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1336 dpaa_sec_detach_rxq(qi, s->inq);
1337 rte_free(s->cipher_key.data);
1338 rte_free(s->auth_key.data);
1339 memset(s, 0, sizeof(dpaa_sec_session));
1340 set_session_private_data(sess, index, NULL);
1341 rte_mempool_put(sess_mp, sess_priv);
1346 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1347 struct rte_cryptodev_config *config __rte_unused)
1349 PMD_INIT_FUNC_TRACE();
1355 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1357 PMD_INIT_FUNC_TRACE();
1362 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1364 PMD_INIT_FUNC_TRACE();
1368 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1370 PMD_INIT_FUNC_TRACE();
1375 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1376 struct rte_cryptodev_info *info)
1378 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1380 PMD_INIT_FUNC_TRACE();
1382 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1383 info->feature_flags = dev->feature_flags;
1384 info->capabilities = dpaa_sec_capabilities;
1385 info->sym.max_nb_sessions = internals->max_nb_sessions;
1386 info->sym.max_nb_sessions_per_qp =
1387 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
1388 RTE_DPAA_MAX_NB_SEC_QPS;
1389 info->driver_id = cryptodev_driver_id;
1393 static struct rte_cryptodev_ops crypto_ops = {
1394 .dev_configure = dpaa_sec_dev_configure,
1395 .dev_start = dpaa_sec_dev_start,
1396 .dev_stop = dpaa_sec_dev_stop,
1397 .dev_close = dpaa_sec_dev_close,
1398 .dev_infos_get = dpaa_sec_dev_infos_get,
1399 .queue_pair_setup = dpaa_sec_queue_pair_setup,
1400 .queue_pair_release = dpaa_sec_queue_pair_release,
1401 .queue_pair_start = dpaa_sec_queue_pair_start,
1402 .queue_pair_stop = dpaa_sec_queue_pair_stop,
1403 .queue_pair_count = dpaa_sec_queue_pair_count,
1404 .session_get_size = dpaa_sec_session_get_size,
1405 .session_configure = dpaa_sec_session_configure,
1406 .session_clear = dpaa_sec_session_clear,
1407 .qp_attach_session = dpaa_sec_qp_attach_sess,
1408 .qp_detach_session = dpaa_sec_qp_detach_sess,
1412 dpaa_sec_uninit(struct rte_cryptodev *dev)
1414 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1419 rte_mempool_free(internals->ctx_pool);
1420 rte_free(internals);
1422 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1423 dev->data->name, rte_socket_id());
1429 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1431 struct dpaa_sec_dev_private *internals;
1432 struct dpaa_sec_qp *qp;
1437 PMD_INIT_FUNC_TRACE();
1439 cryptodev->driver_id = cryptodev_driver_id;
1440 cryptodev->dev_ops = &crypto_ops;
1442 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1443 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1444 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1445 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1446 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1448 internals = cryptodev->data->dev_private;
1449 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
1450 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1452 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1453 /* init qman fq for queue pair */
1454 qp = &internals->qps[i];
1455 ret = dpaa_sec_init_tx(&qp->outq);
1457 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
1462 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
1463 QMAN_FQ_FLAG_TO_DCPORTAL;
1464 for (i = 0; i < internals->max_nb_sessions; i++) {
1465 /* create rx qman fq for sessions*/
1466 ret = qman_create_fq(0, flags, &internals->inq[i]);
1467 if (unlikely(ret != 0)) {
1468 PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
1473 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1474 internals->ctx_pool = rte_mempool_create((const char *)str,
1477 CTX_POOL_CACHE_SIZE, 0,
1478 NULL, NULL, NULL, NULL,
1480 if (!internals->ctx_pool) {
1481 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1485 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1489 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1491 dpaa_sec_uninit(cryptodev);
1496 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1497 struct rte_dpaa_device *dpaa_dev)
1499 struct rte_cryptodev *cryptodev;
1500 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1504 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1506 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1507 if (cryptodev == NULL)
1510 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1511 cryptodev->data->dev_private = rte_zmalloc_socket(
1512 "cryptodev private structure",
1513 sizeof(struct dpaa_sec_dev_private),
1514 RTE_CACHE_LINE_SIZE,
1517 if (cryptodev->data->dev_private == NULL)
1518 rte_panic("Cannot allocate memzone for private "
1522 dpaa_dev->crypto_dev = cryptodev;
1523 cryptodev->device = &dpaa_dev->device;
1524 cryptodev->device->driver = &dpaa_drv->driver;
1526 /* init user callbacks */
1527 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1529 /* if sec device version is not configured */
1530 if (!rta_get_sec_era()) {
1531 const struct device_node *caam_node;
1533 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1534 const uint32_t *prop = of_get_property(caam_node,
1539 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1545 /* Invoke PMD device initialization function */
1546 retval = dpaa_sec_dev_init(cryptodev);
1550 /* In case of error, cleanup is done */
1551 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1552 rte_free(cryptodev->data->dev_private);
1554 rte_cryptodev_pmd_release_device(cryptodev);
1560 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1562 struct rte_cryptodev *cryptodev;
1565 cryptodev = dpaa_dev->crypto_dev;
1566 if (cryptodev == NULL)
1569 ret = dpaa_sec_uninit(cryptodev);
1573 return rte_cryptodev_pmd_destroy(cryptodev);
1576 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1577 .drv_type = FSL_DPAA_CRYPTO,
1579 .name = "DPAA SEC PMD"
1581 .probe = cryptodev_dpaa_sec_probe,
1582 .remove = cryptodev_dpaa_sec_remove,
1585 static struct cryptodev_driver dpaa_sec_crypto_drv;
1587 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1588 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1589 cryptodev_driver_id);