4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of NXP nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_byteorder.h>
40 #include <rte_common.h>
41 #include <rte_cryptodev_pmd.h>
42 #include <rte_crypto.h>
43 #include <rte_cryptodev.h>
44 #include <rte_cycles.h>
46 #include <rte_kvargs.h>
47 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_string_fns.h>
56 /* RTA header files */
57 #include <hw/desc/common.h>
58 #include <hw/desc/algo.h>
59 #include <hw/desc/ipsec.h>
61 #include <rte_dpaa_bus.h>
63 #include <dpaa_sec_log.h>
65 enum rta_sec_era rta_sec_era;
67 static uint8_t cryptodev_driver_id;
69 static __thread struct rte_crypto_op **dpaa_sec_ops;
70 static __thread int dpaa_sec_op_nb;
73 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
75 if (!ctx->fd_status) {
76 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
78 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
79 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
82 /* report op status to sym->op and then free the ctx memeory */
83 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
86 static inline struct dpaa_sec_op_ctx *
87 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
89 struct dpaa_sec_op_ctx *ctx;
92 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
94 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
98 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
99 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
100 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
101 * each packet, memset is costlier than dcbz_64().
103 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
104 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
105 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
106 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
108 ctx->ctx_pool = ses->ctx_pool;
113 static inline phys_addr_t
114 dpaa_mem_vtop(void *vaddr)
116 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
117 uint64_t vaddr_64, paddr;
120 vaddr_64 = (uint64_t)vaddr;
121 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
122 if (vaddr_64 >= memseg[i].addr_64 &&
123 vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
124 paddr = memseg[i].phys_addr +
125 (vaddr_64 - memseg[i].addr_64);
127 return (phys_addr_t)paddr;
130 return (phys_addr_t)(NULL);
134 dpaa_mem_ptov(phys_addr_t paddr)
136 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
139 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
140 if (paddr >= memseg[i].phys_addr &&
141 (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
142 return (void *)(memseg[i].addr_64 +
143 (paddr - memseg[i].phys_addr));
149 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
151 const struct qm_mr_entry *msg)
153 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
154 fq->fqid, msg->ern.rc, msg->ern.seqnum);
157 /* initialize the queue with dest chan as caam chan so that
158 * all the packets in this queue could be dispatched into caam
161 dpaa_sec_init_rx(struct qman_fq *fq_in, phys_addr_t hwdesc,
164 struct qm_mcc_initfq fq_opts;
168 /* Clear FQ options */
169 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
171 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
172 QMAN_FQ_FLAG_TO_DCPORTAL;
174 ret = qman_create_fq(0, flags, fq_in);
175 if (unlikely(ret != 0)) {
176 PMD_INIT_LOG(ERR, "qman_create_fq failed");
180 flags = QMAN_INITFQ_FLAG_SCHED;
181 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
182 QM_INITFQ_WE_CONTEXTB;
184 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
185 fq_opts.fqd.context_b = fqid_out;
186 fq_opts.fqd.dest.channel = qm_channel_caam;
187 fq_opts.fqd.dest.wq = 0;
189 fq_in->cb.ern = ern_sec_fq_handler;
191 ret = qman_init_fq(fq_in, flags, &fq_opts);
192 if (unlikely(ret != 0))
193 PMD_INIT_LOG(ERR, "qman_init_fq failed");
198 /* something is put into in_fq and caam put the crypto result into out_fq */
199 static enum qman_cb_dqrr_result
200 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
201 struct qman_fq *fq __always_unused,
202 const struct qm_dqrr_entry *dqrr)
204 const struct qm_fd *fd;
205 struct dpaa_sec_job *job;
206 struct dpaa_sec_op_ctx *ctx;
208 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
209 return qman_cb_dqrr_defer;
211 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
212 return qman_cb_dqrr_consume;
215 /* sg is embedded in an op ctx,
216 * sg[0] is for output
219 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
220 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
221 ctx->fd_status = fd->status;
222 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
223 dpaa_sec_op_ending(ctx);
225 return qman_cb_dqrr_consume;
228 /* caam result is put into this queue */
230 dpaa_sec_init_tx(struct qman_fq *fq)
233 struct qm_mcc_initfq opts;
236 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
237 QMAN_FQ_FLAG_DYNAMIC_FQID;
239 ret = qman_create_fq(0, flags, fq);
241 PMD_INIT_LOG(ERR, "qman_create_fq failed");
245 memset(&opts, 0, sizeof(opts));
246 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
247 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
249 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
251 fq->cb.dqrr = dqrr_out_fq_cb_rx;
252 fq->cb.ern = ern_sec_fq_handler;
254 ret = qman_init_fq(fq, 0, &opts);
256 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
263 static inline int is_cipher_only(dpaa_sec_session *ses)
265 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
266 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
269 static inline int is_auth_only(dpaa_sec_session *ses)
271 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
272 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
275 static inline int is_aead(dpaa_sec_session *ses)
277 return ((ses->cipher_alg == 0) &&
278 (ses->auth_alg == 0) &&
279 (ses->aead_alg != 0));
282 static inline int is_auth_cipher(dpaa_sec_session *ses)
284 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
285 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
288 static inline int is_encode(dpaa_sec_session *ses)
290 return ses->dir == DIR_ENC;
293 static inline int is_decode(dpaa_sec_session *ses)
295 return ses->dir == DIR_DEC;
299 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
301 switch (ses->auth_alg) {
302 case RTE_CRYPTO_AUTH_NULL:
303 ses->digest_length = 0;
305 case RTE_CRYPTO_AUTH_MD5_HMAC:
306 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
307 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 case RTE_CRYPTO_AUTH_SHA1_HMAC:
310 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
311 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 case RTE_CRYPTO_AUTH_SHA224_HMAC:
314 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
315 alginfo_a->algmode = OP_ALG_AAI_HMAC;
317 case RTE_CRYPTO_AUTH_SHA256_HMAC:
318 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
319 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321 case RTE_CRYPTO_AUTH_SHA384_HMAC:
322 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
323 alginfo_a->algmode = OP_ALG_AAI_HMAC;
325 case RTE_CRYPTO_AUTH_SHA512_HMAC:
326 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
327 alginfo_a->algmode = OP_ALG_AAI_HMAC;
330 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
335 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
337 switch (ses->cipher_alg) {
338 case RTE_CRYPTO_CIPHER_NULL:
340 case RTE_CRYPTO_CIPHER_AES_CBC:
341 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
342 alginfo_c->algmode = OP_ALG_AAI_CBC;
344 case RTE_CRYPTO_CIPHER_3DES_CBC:
345 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
346 alginfo_c->algmode = OP_ALG_AAI_CBC;
348 case RTE_CRYPTO_CIPHER_AES_CTR:
349 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
350 alginfo_c->algmode = OP_ALG_AAI_CTR;
353 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
358 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
360 switch (ses->aead_alg) {
361 case RTE_CRYPTO_AEAD_AES_GCM:
362 alginfo->algtype = OP_ALG_ALGSEL_AES;
363 alginfo->algmode = OP_ALG_AAI_GCM;
366 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
371 /* prepare command block of the session */
373 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
375 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
376 uint32_t shared_desc_len = 0;
377 struct sec_cdb *cdb = &ses->qp->cdb;
379 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
385 memset(cdb, 0, sizeof(struct sec_cdb));
387 if (is_cipher_only(ses)) {
388 caam_cipher_alg(ses, &alginfo_c);
389 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
390 PMD_TX_LOG(ERR, "not supported cipher alg\n");
394 alginfo_c.key = (uint64_t)ses->cipher_key.data;
395 alginfo_c.keylen = ses->cipher_key.length;
396 alginfo_c.key_enc_flags = 0;
397 alginfo_c.key_type = RTA_DATA_IMM;
399 shared_desc_len = cnstr_shdsc_blkcipher(
405 } else if (is_auth_only(ses)) {
406 caam_auth_alg(ses, &alginfo_a);
407 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
408 PMD_TX_LOG(ERR, "not supported auth alg\n");
412 alginfo_a.key = (uint64_t)ses->auth_key.data;
413 alginfo_a.keylen = ses->auth_key.length;
414 alginfo_a.key_enc_flags = 0;
415 alginfo_a.key_type = RTA_DATA_IMM;
417 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
421 } else if (is_aead(ses)) {
422 caam_aead_alg(ses, &alginfo);
423 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
424 PMD_TX_LOG(ERR, "not supported aead alg\n");
427 alginfo.key = (uint64_t)ses->aead_key.data;
428 alginfo.keylen = ses->aead_key.length;
429 alginfo.key_enc_flags = 0;
430 alginfo.key_type = RTA_DATA_IMM;
432 if (ses->dir == DIR_ENC)
433 shared_desc_len = cnstr_shdsc_gcm_encap(
434 cdb->sh_desc, true, swap,
439 shared_desc_len = cnstr_shdsc_gcm_decap(
440 cdb->sh_desc, true, swap,
445 caam_cipher_alg(ses, &alginfo_c);
446 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
447 PMD_TX_LOG(ERR, "not supported cipher alg\n");
451 alginfo_c.key = (uint64_t)ses->cipher_key.data;
452 alginfo_c.keylen = ses->cipher_key.length;
453 alginfo_c.key_enc_flags = 0;
454 alginfo_c.key_type = RTA_DATA_IMM;
456 caam_auth_alg(ses, &alginfo_a);
457 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
458 PMD_TX_LOG(ERR, "not supported auth alg\n");
462 alginfo_a.key = (uint64_t)ses->auth_key.data;
463 alginfo_a.keylen = ses->auth_key.length;
464 alginfo_a.key_enc_flags = 0;
465 alginfo_a.key_type = RTA_DATA_IMM;
467 cdb->sh_desc[0] = alginfo_c.keylen;
468 cdb->sh_desc[1] = alginfo_a.keylen;
469 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
471 (unsigned int *)cdb->sh_desc,
472 &cdb->sh_desc[2], 2);
475 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
478 if (cdb->sh_desc[2] & 1)
479 alginfo_c.key_type = RTA_DATA_IMM;
481 alginfo_c.key = (uint64_t)dpaa_mem_vtop(
482 (void *)alginfo_c.key);
483 alginfo_c.key_type = RTA_DATA_PTR;
485 if (cdb->sh_desc[2] & (1<<1))
486 alginfo_a.key_type = RTA_DATA_IMM;
488 alginfo_a.key = (uint64_t)dpaa_mem_vtop(
489 (void *)alginfo_a.key);
490 alginfo_a.key_type = RTA_DATA_PTR;
496 /* Auth_only_len is set as 0 here and it will be overwritten
497 * in fd for each packet.
499 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
500 true, swap, &alginfo_c, &alginfo_a,
502 ses->digest_length, ses->dir);
504 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
505 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
506 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
511 static inline unsigned int
512 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
514 unsigned int pkts = 0;
516 struct qm_mcr_queryfq_np np;
517 enum qman_fq_state state;
521 qman_query_fq_np(fq, &np);
523 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
525 vdqcr |= QM_VDQCR_EXACT;
526 ret = qman_volatile_dequeue(fq, 0, vdqcr);
530 pkts += qman_poll_dqrr(len);
531 qman_fq_state(fq, &state, &flags);
532 } while (flags & QMAN_FQ_STATE_VDQCR);
537 /* qp is lockless, should be accessed by only one thread */
539 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
547 if (unlikely(nb_ops > DPAA_SEC_BURST))
548 nb_ops = DPAA_SEC_BURST;
550 return dpaa_volatile_deq(fq, nb_ops, 1);
555 * |<----data_len------->|
556 * |ip_header|ah_header|icv|payload|
561 static inline struct dpaa_sec_job *
562 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
564 struct rte_crypto_sym_op *sym = op->sym;
565 struct rte_mbuf *mbuf = sym->m_src;
566 struct dpaa_sec_job *cf;
567 struct dpaa_sec_op_ctx *ctx;
568 struct qm_sg_entry *sg;
569 phys_addr_t start_addr;
572 ctx = dpaa_sec_alloc_ctx(ses);
578 old_digest = ctx->digest;
580 start_addr = rte_pktmbuf_mtophys(mbuf);
583 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
584 sg->length = ses->digest_length;
589 if (is_decode(ses)) {
590 /* need to extend the input to a compound frame */
592 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
593 sg->length = sym->auth.data.length + ses->digest_length;
598 /* hash result or digest, save digest first */
599 rte_memcpy(old_digest, sym->auth.digest.data,
601 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
602 sg->length = sym->auth.data.length;
605 /* let's check digest by hw */
606 start_addr = dpaa_mem_vtop(old_digest);
608 qm_sg_entry_set64(sg, start_addr);
609 sg->length = ses->digest_length;
613 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
614 sg->length = sym->auth.data.length;
622 static inline struct dpaa_sec_job *
623 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
625 struct rte_crypto_sym_op *sym = op->sym;
626 struct dpaa_sec_job *cf;
627 struct dpaa_sec_op_ctx *ctx;
628 struct qm_sg_entry *sg;
629 phys_addr_t src_start_addr, dst_start_addr;
630 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
633 ctx = dpaa_sec_alloc_ctx(ses);
640 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
643 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
645 dst_start_addr = src_start_addr;
649 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
650 sg->length = sym->cipher.data.length + ses->iv.length;
656 /* need to extend the input to a compound frame */
659 sg->length = sym->cipher.data.length + ses->iv.length;
660 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
664 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
665 sg->length = ses->iv.length;
669 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
670 sg->length = sym->cipher.data.length;
677 static inline struct dpaa_sec_job *
678 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
680 struct rte_crypto_sym_op *sym = op->sym;
681 struct dpaa_sec_job *cf;
682 struct dpaa_sec_op_ctx *ctx;
683 struct qm_sg_entry *sg;
685 phys_addr_t src_start_addr, dst_start_addr;
686 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
689 src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
692 dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
694 dst_start_addr = src_start_addr;
696 ctx = dpaa_sec_alloc_ctx(ses);
704 rte_prefetch0(cf->sg);
706 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
707 if (is_encode(ses)) {
708 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
709 sg->length = ses->iv.length;
710 length += sg->length;
714 if (ses->auth_only_len) {
715 qm_sg_entry_set64(sg,
716 dpaa_mem_vtop(sym->aead.aad.data));
717 sg->length = ses->auth_only_len;
718 length += sg->length;
722 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
723 sg->length = sym->aead.data.length;
724 length += sg->length;
728 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
729 sg->length = ses->iv.length;
730 length += sg->length;
734 if (ses->auth_only_len) {
735 qm_sg_entry_set64(sg,
736 dpaa_mem_vtop(sym->aead.aad.data));
737 sg->length = ses->auth_only_len;
738 length += sg->length;
742 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
743 sg->length = sym->aead.data.length;
744 length += sg->length;
747 memcpy(ctx->digest, sym->aead.digest.data,
751 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
752 sg->length = ses->digest_length;
753 length += sg->length;
757 /* input compound frame */
758 cf->sg[1].length = length;
759 cf->sg[1].extension = 1;
761 cpu_to_hw_sg(&cf->sg[1]);
765 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
766 qm_sg_entry_set64(sg,
767 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
768 sg->length = sym->aead.data.length + ses->auth_only_len;
770 if (is_encode(ses)) {
772 /* set auth output */
774 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
775 sg->length = ses->digest_length;
776 length += sg->length;
781 /* output compound frame */
782 cf->sg[0].length = length;
783 cf->sg[0].extension = 1;
784 cpu_to_hw_sg(&cf->sg[0]);
789 static inline struct dpaa_sec_job *
790 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
792 struct rte_crypto_sym_op *sym = op->sym;
793 struct dpaa_sec_job *cf;
794 struct dpaa_sec_op_ctx *ctx;
795 struct qm_sg_entry *sg;
796 phys_addr_t src_start_addr, dst_start_addr;
798 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
801 src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
803 dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
805 dst_start_addr = src_start_addr;
807 ctx = dpaa_sec_alloc_ctx(ses);
815 rte_prefetch0(cf->sg);
817 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
818 if (is_encode(ses)) {
819 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
820 sg->length = ses->iv.length;
821 length += sg->length;
825 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
826 sg->length = sym->auth.data.length;
827 length += sg->length;
831 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
832 sg->length = ses->iv.length;
833 length += sg->length;
838 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
839 sg->length = sym->auth.data.length;
840 length += sg->length;
843 memcpy(ctx->digest, sym->auth.digest.data,
847 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
848 sg->length = ses->digest_length;
849 length += sg->length;
853 /* input compound frame */
854 cf->sg[1].length = length;
855 cf->sg[1].extension = 1;
857 cpu_to_hw_sg(&cf->sg[1]);
861 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
862 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
863 sg->length = sym->cipher.data.length;
865 if (is_encode(ses)) {
867 /* set auth output */
869 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
870 sg->length = ses->digest_length;
871 length += sg->length;
876 /* output compound frame */
877 cf->sg[0].length = length;
878 cf->sg[0].extension = 1;
879 cpu_to_hw_sg(&cf->sg[0]);
885 dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
887 struct dpaa_sec_job *cf;
888 dpaa_sec_session *ses;
891 uint32_t auth_only_len = op->sym->auth.data.length -
892 op->sym->cipher.data.length;
894 ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
895 cryptodev_driver_id);
897 if (unlikely(!qp->ses || qp->ses != ses)) {
900 ret = dpaa_sec_prep_cdb(ses);
906 * Segmented buffer is not supported.
908 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
909 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
912 if (is_auth_only(ses)) {
913 cf = build_auth_only(op, ses);
914 } else if (is_cipher_only(ses)) {
915 cf = build_cipher_only(op, ses);
916 } else if (is_aead(ses)) {
917 cf = build_cipher_auth_gcm(op, ses);
918 auth_only_len = ses->auth_only_len;
919 } else if (is_auth_cipher(ses)) {
920 cf = build_cipher_auth(op, ses);
922 PMD_TX_LOG(ERR, "not supported sec op");
928 memset(&fd, 0, sizeof(struct qm_fd));
929 qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
930 fd._format1 = qm_fd_compound;
931 fd.length29 = 2 * sizeof(struct qm_sg_entry);
932 /* Auth_only_len is set as 0 in descriptor and it is overwritten
933 * here in the fd.cmd which will update the DPOVRD reg.
936 fd.cmd = 0x80000000 | auth_only_len;
938 ret = qman_enqueue(&qp->inq, &fd, 0);
945 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
948 /* Function to transmit the frames to given device and queuepair */
951 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
954 if (unlikely(nb_ops == 0))
957 /*Prepare each packet which is to be sent*/
958 for (loop = 0; loop < nb_ops; loop++) {
959 if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
960 PMD_TX_LOG(ERR, "sessionless crypto op not supported");
963 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
967 dpaa_qp->tx_pkts += num_tx;
968 dpaa_qp->tx_errs += nb_ops - num_tx;
974 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
978 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
980 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
982 dpaa_qp->rx_pkts += num_rx;
983 dpaa_qp->rx_errs += nb_ops - num_rx;
985 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
990 /** Release queue pair */
992 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
995 struct dpaa_sec_dev_private *internals;
996 struct dpaa_sec_qp *qp = NULL;
998 PMD_INIT_FUNC_TRACE();
1000 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1002 internals = dev->data->dev_private;
1003 if (qp_id >= internals->max_nb_queue_pairs) {
1004 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1005 internals->max_nb_queue_pairs);
1009 qp = &internals->qps[qp_id];
1010 qp->internals = NULL;
1011 dev->data->queue_pairs[qp_id] = NULL;
1016 /** Setup a queue pair */
1018 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1019 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1020 __rte_unused int socket_id,
1021 __rte_unused struct rte_mempool *session_pool)
1023 struct dpaa_sec_dev_private *internals;
1024 struct dpaa_sec_qp *qp = NULL;
1026 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1027 dev, qp_id, qp_conf);
1029 internals = dev->data->dev_private;
1030 if (qp_id >= internals->max_nb_queue_pairs) {
1031 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1032 internals->max_nb_queue_pairs);
1036 qp = &internals->qps[qp_id];
1037 qp->internals = internals;
1038 dev->data->queue_pairs[qp_id] = qp;
1043 /** Start queue pair */
1045 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1046 __rte_unused uint16_t queue_pair_id)
1048 PMD_INIT_FUNC_TRACE();
1053 /** Stop queue pair */
1055 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1056 __rte_unused uint16_t queue_pair_id)
1058 PMD_INIT_FUNC_TRACE();
1063 /** Return the number of allocated queue pairs */
1065 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1067 PMD_INIT_FUNC_TRACE();
1069 return dev->data->nb_queue_pairs;
1072 /** Returns the size of session structure */
1074 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1076 PMD_INIT_FUNC_TRACE();
1078 return sizeof(dpaa_sec_session);
1082 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1083 struct rte_crypto_sym_xform *xform,
1084 dpaa_sec_session *session)
1086 session->cipher_alg = xform->cipher.algo;
1087 session->iv.length = xform->cipher.iv.length;
1088 session->iv.offset = xform->cipher.iv.offset;
1089 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1090 RTE_CACHE_LINE_SIZE);
1091 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1092 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1095 session->cipher_key.length = xform->cipher.key.length;
1097 memcpy(session->cipher_key.data, xform->cipher.key.data,
1098 xform->cipher.key.length);
1099 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1106 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1107 struct rte_crypto_sym_xform *xform,
1108 dpaa_sec_session *session)
1110 session->auth_alg = xform->auth.algo;
1111 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1112 RTE_CACHE_LINE_SIZE);
1113 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1114 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1117 session->auth_key.length = xform->auth.key.length;
1118 session->digest_length = xform->auth.digest_length;
1120 memcpy(session->auth_key.data, xform->auth.key.data,
1121 xform->auth.key.length);
1122 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1129 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1130 struct rte_crypto_sym_xform *xform,
1131 dpaa_sec_session *session)
1133 session->aead_alg = xform->aead.algo;
1134 session->iv.length = xform->aead.iv.length;
1135 session->iv.offset = xform->aead.iv.offset;
1136 session->auth_only_len = xform->aead.aad_length;
1137 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1138 RTE_CACHE_LINE_SIZE);
1139 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1140 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1143 session->aead_key.length = xform->aead.key.length;
1144 session->digest_length = xform->aead.digest_length;
1146 memcpy(session->aead_key.data, xform->aead.key.data,
1147 xform->aead.key.length);
1148 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1155 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1157 dpaa_sec_session *sess = ses;
1158 struct dpaa_sec_qp *qp;
1160 PMD_INIT_FUNC_TRACE();
1162 qp = dev->data->queue_pairs[qp_id];
1163 if (qp->ses != NULL) {
1164 PMD_INIT_LOG(ERR, "qp in-use by another session\n");
1171 return dpaa_sec_prep_cdb(sess);
1175 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1177 dpaa_sec_session *sess = ses;
1178 struct dpaa_sec_qp *qp;
1180 PMD_INIT_FUNC_TRACE();
1182 qp = dev->data->queue_pairs[qp_id];
1183 if (qp->ses != NULL) {
1189 PMD_DRV_LOG(ERR, "No session attached to qp");
1194 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1195 struct rte_crypto_sym_xform *xform, void *sess)
1197 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1198 dpaa_sec_session *session = sess;
1200 PMD_INIT_FUNC_TRACE();
1202 if (unlikely(sess == NULL)) {
1203 RTE_LOG(ERR, PMD, "invalid session struct\n");
1207 /* Default IV length = 0 */
1208 session->iv.length = 0;
1211 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1212 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1213 dpaa_sec_cipher_init(dev, xform, session);
1215 /* Authentication Only */
1216 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1217 xform->next == NULL) {
1218 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1219 dpaa_sec_auth_init(dev, xform, session);
1221 /* Cipher then Authenticate */
1222 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1223 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1224 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1225 dpaa_sec_cipher_init(dev, xform, session);
1226 dpaa_sec_auth_init(dev, xform->next, session);
1228 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1232 /* Authenticate then Cipher */
1233 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1234 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1235 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1236 dpaa_sec_auth_init(dev, xform, session);
1237 dpaa_sec_cipher_init(dev, xform->next, session);
1239 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1243 /* AEAD operation for AES-GCM kind of Algorithms */
1244 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1245 xform->next == NULL) {
1246 dpaa_sec_aead_init(dev, xform, session);
1249 PMD_DRV_LOG(ERR, "Invalid crypto type");
1252 session->ctx_pool = internals->ctx_pool;
1258 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1259 struct rte_crypto_sym_xform *xform,
1260 struct rte_cryptodev_sym_session *sess,
1261 struct rte_mempool *mempool)
1263 void *sess_private_data;
1266 PMD_INIT_FUNC_TRACE();
1268 if (rte_mempool_get(mempool, &sess_private_data)) {
1270 "Couldn't get object from session mempool");
1274 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1276 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1277 "session parameters");
1279 /* Return session to mempool */
1280 rte_mempool_put(mempool, sess_private_data);
1284 set_session_private_data(sess, dev->driver_id,
1290 /** Clear the memory of session so it doesn't leave key material behind */
1292 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1293 struct rte_cryptodev_sym_session *sess)
1295 PMD_INIT_FUNC_TRACE();
1296 uint8_t index = dev->driver_id;
1297 void *sess_priv = get_session_private_data(sess, index);
1298 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1301 rte_free(s->cipher_key.data);
1302 rte_free(s->auth_key.data);
1303 memset(s, 0, sizeof(dpaa_sec_session));
1304 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1305 set_session_private_data(sess, index, NULL);
1306 rte_mempool_put(sess_mp, sess_priv);
1311 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1312 struct rte_cryptodev_config *config __rte_unused)
1314 PMD_INIT_FUNC_TRACE();
1320 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1322 PMD_INIT_FUNC_TRACE();
1327 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1329 PMD_INIT_FUNC_TRACE();
1333 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1335 PMD_INIT_FUNC_TRACE();
1340 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1341 struct rte_cryptodev_info *info)
1343 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1345 PMD_INIT_FUNC_TRACE();
1347 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1348 info->feature_flags = dev->feature_flags;
1349 info->capabilities = dpaa_sec_capabilities;
1350 info->sym.max_nb_sessions = internals->max_nb_sessions;
1351 info->sym.max_nb_sessions_per_qp =
1352 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
1353 info->driver_id = cryptodev_driver_id;
1357 static struct rte_cryptodev_ops crypto_ops = {
1358 .dev_configure = dpaa_sec_dev_configure,
1359 .dev_start = dpaa_sec_dev_start,
1360 .dev_stop = dpaa_sec_dev_stop,
1361 .dev_close = dpaa_sec_dev_close,
1362 .dev_infos_get = dpaa_sec_dev_infos_get,
1363 .queue_pair_setup = dpaa_sec_queue_pair_setup,
1364 .queue_pair_release = dpaa_sec_queue_pair_release,
1365 .queue_pair_start = dpaa_sec_queue_pair_start,
1366 .queue_pair_stop = dpaa_sec_queue_pair_stop,
1367 .queue_pair_count = dpaa_sec_queue_pair_count,
1368 .session_get_size = dpaa_sec_session_get_size,
1369 .session_configure = dpaa_sec_session_configure,
1370 .session_clear = dpaa_sec_session_clear,
1371 .qp_attach_session = dpaa_sec_qp_attach_sess,
1372 .qp_detach_session = dpaa_sec_qp_detach_sess,
1376 dpaa_sec_uninit(struct rte_cryptodev *dev)
1378 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1383 rte_mempool_free(internals->ctx_pool);
1384 rte_free(internals);
1386 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1387 dev->data->name, rte_socket_id());
1393 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1395 struct dpaa_sec_dev_private *internals;
1396 struct dpaa_sec_qp *qp;
1401 PMD_INIT_FUNC_TRACE();
1403 cryptodev->driver_id = cryptodev_driver_id;
1404 cryptodev->dev_ops = &crypto_ops;
1406 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1407 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1408 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1409 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1410 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1412 internals = cryptodev->data->dev_private;
1413 internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
1414 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1416 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1417 /* init qman fq for queue pair */
1418 qp = &internals->qps[i];
1419 ret = dpaa_sec_init_tx(&qp->outq);
1421 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
1424 ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
1425 qman_fq_fqid(&qp->outq));
1427 PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
1432 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1433 internals->ctx_pool = rte_mempool_create((const char *)str,
1436 CTX_POOL_CACHE_SIZE, 0,
1437 NULL, NULL, NULL, NULL,
1439 if (!internals->ctx_pool) {
1440 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1444 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1448 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1450 dpaa_sec_uninit(cryptodev);
1455 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1456 struct rte_dpaa_device *dpaa_dev)
1458 struct rte_cryptodev *cryptodev;
1459 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1463 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1465 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1466 if (cryptodev == NULL)
1469 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1470 cryptodev->data->dev_private = rte_zmalloc_socket(
1471 "cryptodev private structure",
1472 sizeof(struct dpaa_sec_dev_private),
1473 RTE_CACHE_LINE_SIZE,
1476 if (cryptodev->data->dev_private == NULL)
1477 rte_panic("Cannot allocate memzone for private "
1481 dpaa_dev->crypto_dev = cryptodev;
1482 cryptodev->device = &dpaa_dev->device;
1483 cryptodev->device->driver = &dpaa_drv->driver;
1485 /* init user callbacks */
1486 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1488 /* if sec device version is not configured */
1489 if (!rta_get_sec_era()) {
1490 const struct device_node *caam_node;
1492 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1493 const uint32_t *prop = of_get_property(caam_node,
1498 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1504 /* Invoke PMD device initialization function */
1505 retval = dpaa_sec_dev_init(cryptodev);
1509 /* In case of error, cleanup is done */
1510 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1511 rte_free(cryptodev->data->dev_private);
1513 rte_cryptodev_pmd_release_device(cryptodev);
1519 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1521 struct rte_cryptodev *cryptodev;
1524 cryptodev = dpaa_dev->crypto_dev;
1525 if (cryptodev == NULL)
1528 ret = dpaa_sec_uninit(cryptodev);
1532 return rte_cryptodev_pmd_destroy(cryptodev);
1535 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1536 .drv_type = FSL_DPAA_CRYPTO,
1538 .name = "DPAA SEC PMD"
1540 .probe = cryptodev_dpaa_sec_probe,
1541 .remove = cryptodev_dpaa_sec_remove,
1544 static struct cryptodev_driver dpaa_sec_crypto_drv;
1546 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1547 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1548 cryptodev_driver_id);