1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
42 static uint8_t cryptodev_driver_id;
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 if (!ctx->fd_status) {
54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 /* report op status to sym->op and then free the ctx memeory */
61 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
72 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
76 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 * each packet, memset is costlier than dcbz_64().
81 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
86 ctx->ctx_pool = ses->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx
88 - rte_mempool_virt2iova(ctx);
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
96 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
97 uint64_t vaddr_64, paddr;
100 vaddr_64 = (size_t)vaddr;
101 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
102 if (vaddr_64 >= memseg[i].addr_64 &&
103 vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
104 paddr = memseg[i].iova +
105 (vaddr_64 - memseg[i].addr_64);
107 return (rte_iova_t)paddr;
113 /* virtual address conversin when mempool support is available for ctx */
114 static inline phys_addr_t
115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
117 return (size_t)vaddr - ctx->vtop_offset;
121 dpaa_mem_ptov(rte_iova_t paddr)
123 return rte_mem_iova2virt(paddr);
127 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
129 const struct qm_mr_entry *msg)
131 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
132 fq->fqid, msg->ern.rc, msg->ern.seqnum);
135 /* initialize the queue with dest chan as caam chan so that
136 * all the packets in this queue could be dispatched into caam
139 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
142 struct qm_mcc_initfq fq_opts;
146 /* Clear FQ options */
147 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
149 flags = QMAN_INITFQ_FLAG_SCHED;
150 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
151 QM_INITFQ_WE_CONTEXTB;
153 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
154 fq_opts.fqd.context_b = fqid_out;
155 fq_opts.fqd.dest.channel = qm_channel_caam;
156 fq_opts.fqd.dest.wq = 0;
158 fq_in->cb.ern = ern_sec_fq_handler;
160 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
162 ret = qman_init_fq(fq_in, flags, &fq_opts);
163 if (unlikely(ret != 0))
164 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
169 /* something is put into in_fq and caam put the crypto result into out_fq */
170 static enum qman_cb_dqrr_result
171 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
172 struct qman_fq *fq __always_unused,
173 const struct qm_dqrr_entry *dqrr)
175 const struct qm_fd *fd;
176 struct dpaa_sec_job *job;
177 struct dpaa_sec_op_ctx *ctx;
179 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
180 return qman_cb_dqrr_defer;
182 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
183 return qman_cb_dqrr_consume;
186 /* sg is embedded in an op ctx,
187 * sg[0] is for output
190 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
192 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
193 ctx->fd_status = fd->status;
194 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
195 struct qm_sg_entry *sg_out;
198 sg_out = &job->sg[0];
199 hw_sg_to_cpu(sg_out);
200 len = sg_out->length;
201 ctx->op->sym->m_src->pkt_len = len;
202 ctx->op->sym->m_src->data_len = len;
204 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 dpaa_sec_op_ending(ctx);
207 return qman_cb_dqrr_consume;
210 /* caam result is put into this queue */
212 dpaa_sec_init_tx(struct qman_fq *fq)
215 struct qm_mcc_initfq opts;
218 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 QMAN_FQ_FLAG_DYNAMIC_FQID;
221 ret = qman_create_fq(0, flags, fq);
223 PMD_INIT_LOG(ERR, "qman_create_fq failed");
227 memset(&opts, 0, sizeof(opts));
228 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
231 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
233 fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 fq->cb.ern = ern_sec_fq_handler;
236 ret = qman_init_fq(fq, 0, &opts);
238 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
245 static inline int is_cipher_only(dpaa_sec_session *ses)
247 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
251 static inline int is_auth_only(dpaa_sec_session *ses)
253 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
257 static inline int is_aead(dpaa_sec_session *ses)
259 return ((ses->cipher_alg == 0) &&
260 (ses->auth_alg == 0) &&
261 (ses->aead_alg != 0));
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
266 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
271 static inline int is_proto_ipsec(dpaa_sec_session *ses)
273 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
276 static inline int is_encode(dpaa_sec_session *ses)
278 return ses->dir == DIR_ENC;
281 static inline int is_decode(dpaa_sec_session *ses)
283 return ses->dir == DIR_DEC;
287 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
289 switch (ses->auth_alg) {
290 case RTE_CRYPTO_AUTH_NULL:
291 ses->digest_length = 0;
293 case RTE_CRYPTO_AUTH_MD5_HMAC:
295 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
296 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
297 alginfo_a->algmode = OP_ALG_AAI_HMAC;
299 case RTE_CRYPTO_AUTH_SHA1_HMAC:
301 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
302 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
303 alginfo_a->algmode = OP_ALG_AAI_HMAC;
305 case RTE_CRYPTO_AUTH_SHA224_HMAC:
307 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
308 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
309 alginfo_a->algmode = OP_ALG_AAI_HMAC;
311 case RTE_CRYPTO_AUTH_SHA256_HMAC:
313 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
314 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
315 alginfo_a->algmode = OP_ALG_AAI_HMAC;
317 case RTE_CRYPTO_AUTH_SHA384_HMAC:
319 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
320 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
321 alginfo_a->algmode = OP_ALG_AAI_HMAC;
323 case RTE_CRYPTO_AUTH_SHA512_HMAC:
325 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
326 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
327 alginfo_a->algmode = OP_ALG_AAI_HMAC;
330 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
335 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
337 switch (ses->cipher_alg) {
338 case RTE_CRYPTO_CIPHER_NULL:
340 case RTE_CRYPTO_CIPHER_AES_CBC:
342 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
343 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
344 alginfo_c->algmode = OP_ALG_AAI_CBC;
346 case RTE_CRYPTO_CIPHER_3DES_CBC:
348 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
349 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
350 alginfo_c->algmode = OP_ALG_AAI_CBC;
352 case RTE_CRYPTO_CIPHER_AES_CTR:
354 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
355 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
356 alginfo_c->algmode = OP_ALG_AAI_CTR;
359 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
364 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
366 switch (ses->aead_alg) {
367 case RTE_CRYPTO_AEAD_AES_GCM:
368 alginfo->algtype = OP_ALG_ALGSEL_AES;
369 alginfo->algmode = OP_ALG_AAI_GCM;
372 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
377 /* prepare command block of the session */
379 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
381 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
382 uint32_t shared_desc_len = 0;
383 struct sec_cdb *cdb = &ses->cdb;
385 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
391 memset(cdb, 0, sizeof(struct sec_cdb));
393 if (is_cipher_only(ses)) {
394 caam_cipher_alg(ses, &alginfo_c);
395 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
396 PMD_TX_LOG(ERR, "not supported cipher alg\n");
400 alginfo_c.key = (size_t)ses->cipher_key.data;
401 alginfo_c.keylen = ses->cipher_key.length;
402 alginfo_c.key_enc_flags = 0;
403 alginfo_c.key_type = RTA_DATA_IMM;
405 shared_desc_len = cnstr_shdsc_blkcipher(
411 } else if (is_auth_only(ses)) {
412 caam_auth_alg(ses, &alginfo_a);
413 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
414 PMD_TX_LOG(ERR, "not supported auth alg\n");
418 alginfo_a.key = (size_t)ses->auth_key.data;
419 alginfo_a.keylen = ses->auth_key.length;
420 alginfo_a.key_enc_flags = 0;
421 alginfo_a.key_type = RTA_DATA_IMM;
423 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
427 } else if (is_aead(ses)) {
428 caam_aead_alg(ses, &alginfo);
429 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
430 PMD_TX_LOG(ERR, "not supported aead alg\n");
433 alginfo.key = (size_t)ses->aead_key.data;
434 alginfo.keylen = ses->aead_key.length;
435 alginfo.key_enc_flags = 0;
436 alginfo.key_type = RTA_DATA_IMM;
438 if (ses->dir == DIR_ENC)
439 shared_desc_len = cnstr_shdsc_gcm_encap(
440 cdb->sh_desc, true, swap,
445 shared_desc_len = cnstr_shdsc_gcm_decap(
446 cdb->sh_desc, true, swap,
451 caam_cipher_alg(ses, &alginfo_c);
452 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
453 PMD_TX_LOG(ERR, "not supported cipher alg\n");
457 alginfo_c.key = (size_t)ses->cipher_key.data;
458 alginfo_c.keylen = ses->cipher_key.length;
459 alginfo_c.key_enc_flags = 0;
460 alginfo_c.key_type = RTA_DATA_IMM;
462 caam_auth_alg(ses, &alginfo_a);
463 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
464 PMD_TX_LOG(ERR, "not supported auth alg\n");
468 alginfo_a.key = (size_t)ses->auth_key.data;
469 alginfo_a.keylen = ses->auth_key.length;
470 alginfo_a.key_enc_flags = 0;
471 alginfo_a.key_type = RTA_DATA_IMM;
473 cdb->sh_desc[0] = alginfo_c.keylen;
474 cdb->sh_desc[1] = alginfo_a.keylen;
475 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
477 (unsigned int *)cdb->sh_desc,
478 &cdb->sh_desc[2], 2);
481 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
484 if (cdb->sh_desc[2] & 1)
485 alginfo_c.key_type = RTA_DATA_IMM;
487 alginfo_c.key = (size_t)dpaa_mem_vtop(
488 (void *)(size_t)alginfo_c.key);
489 alginfo_c.key_type = RTA_DATA_PTR;
491 if (cdb->sh_desc[2] & (1<<1))
492 alginfo_a.key_type = RTA_DATA_IMM;
494 alginfo_a.key = (size_t)dpaa_mem_vtop(
495 (void *)(size_t)alginfo_a.key);
496 alginfo_a.key_type = RTA_DATA_PTR;
501 if (is_proto_ipsec(ses)) {
502 if (ses->dir == DIR_ENC) {
503 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
505 true, swap, &ses->encap_pdb,
506 (uint8_t *)&ses->ip4_hdr,
507 &alginfo_c, &alginfo_a);
508 } else if (ses->dir == DIR_DEC) {
509 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
511 true, swap, &ses->decap_pdb,
512 &alginfo_c, &alginfo_a);
515 /* Auth_only_len is set as 0 here and it will be
516 * overwritten in fd for each packet.
518 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
519 true, swap, &alginfo_c, &alginfo_a,
521 ses->digest_length, ses->dir);
524 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
525 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
526 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
531 /* qp is lockless, should be accessed by only one thread */
533 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
536 unsigned int pkts = 0;
538 struct qm_dqrr_entry *dq;
541 ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
542 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
547 const struct qm_fd *fd;
548 struct dpaa_sec_job *job;
549 struct dpaa_sec_op_ctx *ctx;
550 struct rte_crypto_op *op;
552 dq = qman_dequeue(fq);
557 /* sg is embedded in an op ctx,
558 * sg[0] is for output
561 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
563 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
564 ctx->fd_status = fd->status;
566 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
567 struct qm_sg_entry *sg_out;
570 sg_out = &job->sg[0];
571 hw_sg_to_cpu(sg_out);
572 len = sg_out->length;
573 op->sym->m_src->pkt_len = len;
574 op->sym->m_src->data_len = len;
576 if (!ctx->fd_status) {
577 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
579 printf("\nSEC return err: 0x%x", ctx->fd_status);
580 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
584 /* report op status to sym->op and then free the ctx memeory */
585 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
587 qman_dqrr_consume(fq, dq);
588 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
593 static inline struct dpaa_sec_job *
594 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
596 struct rte_crypto_sym_op *sym = op->sym;
597 struct rte_mbuf *mbuf = sym->m_src;
598 struct dpaa_sec_job *cf;
599 struct dpaa_sec_op_ctx *ctx;
600 struct qm_sg_entry *sg, *out_sg, *in_sg;
601 phys_addr_t start_addr;
602 uint8_t *old_digest, extra_segs;
609 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
610 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
614 ctx = dpaa_sec_alloc_ctx(ses);
620 old_digest = ctx->digest;
624 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
625 out_sg->length = ses->digest_length;
626 cpu_to_hw_sg(out_sg);
630 /* need to extend the input to a compound frame */
631 in_sg->extension = 1;
633 in_sg->length = sym->auth.data.length;
634 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
638 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
639 sg->length = mbuf->data_len - sym->auth.data.offset;
640 sg->offset = sym->auth.data.offset;
642 /* Successive segs */
647 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
648 sg->length = mbuf->data_len;
652 if (is_decode(ses)) {
653 /* Digest verification case */
656 rte_memcpy(old_digest, sym->auth.digest.data,
658 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
659 qm_sg_entry_set64(sg, start_addr);
660 sg->length = ses->digest_length;
661 in_sg->length += ses->digest_length;
663 /* Digest calculation case */
664 sg->length -= ses->digest_length;
675 * |<----data_len------->|
676 * |ip_header|ah_header|icv|payload|
681 static inline struct dpaa_sec_job *
682 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
684 struct rte_crypto_sym_op *sym = op->sym;
685 struct rte_mbuf *mbuf = sym->m_src;
686 struct dpaa_sec_job *cf;
687 struct dpaa_sec_op_ctx *ctx;
688 struct qm_sg_entry *sg;
689 rte_iova_t start_addr;
692 ctx = dpaa_sec_alloc_ctx(ses);
698 old_digest = ctx->digest;
700 start_addr = rte_pktmbuf_iova(mbuf);
703 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
704 sg->length = ses->digest_length;
709 if (is_decode(ses)) {
710 /* need to extend the input to a compound frame */
712 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
713 sg->length = sym->auth.data.length + ses->digest_length;
718 /* hash result or digest, save digest first */
719 rte_memcpy(old_digest, sym->auth.digest.data,
721 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
722 sg->length = sym->auth.data.length;
725 /* let's check digest by hw */
726 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
728 qm_sg_entry_set64(sg, start_addr);
729 sg->length = ses->digest_length;
733 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
734 sg->length = sym->auth.data.length;
742 static inline struct dpaa_sec_job *
743 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
745 struct rte_crypto_sym_op *sym = op->sym;
746 struct dpaa_sec_job *cf;
747 struct dpaa_sec_op_ctx *ctx;
748 struct qm_sg_entry *sg, *out_sg, *in_sg;
749 struct rte_mbuf *mbuf;
751 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
756 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
759 req_segs = mbuf->nb_segs * 2 + 3;
762 if (req_segs > MAX_SG_ENTRIES) {
763 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
768 ctx = dpaa_sec_alloc_ctx(ses);
777 out_sg->extension = 1;
778 out_sg->length = sym->cipher.data.length;
779 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
780 cpu_to_hw_sg(out_sg);
784 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
785 sg->length = mbuf->data_len - sym->cipher.data.offset;
786 sg->offset = sym->cipher.data.offset;
788 /* Successive segs */
793 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
794 sg->length = mbuf->data_len;
803 in_sg->extension = 1;
805 in_sg->length = sym->cipher.data.length + ses->iv.length;
808 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
812 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
813 sg->length = ses->iv.length;
818 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
819 sg->length = mbuf->data_len - sym->cipher.data.offset;
820 sg->offset = sym->cipher.data.offset;
822 /* Successive segs */
827 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
828 sg->length = mbuf->data_len;
837 static inline struct dpaa_sec_job *
838 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
840 struct rte_crypto_sym_op *sym = op->sym;
841 struct dpaa_sec_job *cf;
842 struct dpaa_sec_op_ctx *ctx;
843 struct qm_sg_entry *sg;
844 rte_iova_t src_start_addr, dst_start_addr;
845 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
848 ctx = dpaa_sec_alloc_ctx(ses);
855 src_start_addr = rte_pktmbuf_iova(sym->m_src);
858 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
860 dst_start_addr = src_start_addr;
864 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
865 sg->length = sym->cipher.data.length + ses->iv.length;
871 /* need to extend the input to a compound frame */
874 sg->length = sym->cipher.data.length + ses->iv.length;
875 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
879 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
880 sg->length = ses->iv.length;
884 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
885 sg->length = sym->cipher.data.length;
892 static inline struct dpaa_sec_job *
893 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
895 struct rte_crypto_sym_op *sym = op->sym;
896 struct dpaa_sec_job *cf;
897 struct dpaa_sec_op_ctx *ctx;
898 struct qm_sg_entry *sg, *out_sg, *in_sg;
899 struct rte_mbuf *mbuf;
901 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
906 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
909 req_segs = mbuf->nb_segs * 2 + 4;
912 if (ses->auth_only_len)
915 if (req_segs > MAX_SG_ENTRIES) {
916 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
921 ctx = dpaa_sec_alloc_ctx(ses);
928 rte_prefetch0(cf->sg);
932 out_sg->extension = 1;
934 out_sg->length = sym->aead.data.length + ses->auth_only_len
935 + ses->digest_length;
937 out_sg->length = sym->aead.data.length + ses->auth_only_len;
939 /* output sg entries */
941 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
942 cpu_to_hw_sg(out_sg);
945 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
946 sg->length = mbuf->data_len - sym->aead.data.offset +
948 sg->offset = sym->aead.data.offset - ses->auth_only_len;
950 /* Successive segs */
955 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
956 sg->length = mbuf->data_len;
959 sg->length -= ses->digest_length;
961 if (is_encode(ses)) {
963 /* set auth output */
965 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
966 sg->length = ses->digest_length;
974 in_sg->extension = 1;
977 in_sg->length = ses->iv.length + sym->aead.data.length
978 + ses->auth_only_len;
980 in_sg->length = ses->iv.length + sym->aead.data.length
981 + ses->auth_only_len + ses->digest_length;
983 /* input sg entries */
985 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
989 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
990 sg->length = ses->iv.length;
993 /* 2nd seg auth only */
994 if (ses->auth_only_len) {
996 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
997 sg->length = ses->auth_only_len;
1003 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004 sg->length = mbuf->data_len - sym->aead.data.offset;
1005 sg->offset = sym->aead.data.offset;
1007 /* Successive segs */
1012 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1013 sg->length = mbuf->data_len;
1017 if (is_decode(ses)) {
1020 memcpy(ctx->digest, sym->aead.digest.data,
1021 ses->digest_length);
1022 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1023 sg->length = ses->digest_length;
1031 static inline struct dpaa_sec_job *
1032 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1034 struct rte_crypto_sym_op *sym = op->sym;
1035 struct dpaa_sec_job *cf;
1036 struct dpaa_sec_op_ctx *ctx;
1037 struct qm_sg_entry *sg;
1038 uint32_t length = 0;
1039 rte_iova_t src_start_addr, dst_start_addr;
1040 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1043 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1046 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1048 dst_start_addr = src_start_addr;
1050 ctx = dpaa_sec_alloc_ctx(ses);
1058 rte_prefetch0(cf->sg);
1060 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1061 if (is_encode(ses)) {
1062 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1063 sg->length = ses->iv.length;
1064 length += sg->length;
1068 if (ses->auth_only_len) {
1069 qm_sg_entry_set64(sg,
1070 dpaa_mem_vtop(sym->aead.aad.data));
1071 sg->length = ses->auth_only_len;
1072 length += sg->length;
1076 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1077 sg->length = sym->aead.data.length;
1078 length += sg->length;
1082 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1083 sg->length = ses->iv.length;
1084 length += sg->length;
1088 if (ses->auth_only_len) {
1089 qm_sg_entry_set64(sg,
1090 dpaa_mem_vtop(sym->aead.aad.data));
1091 sg->length = ses->auth_only_len;
1092 length += sg->length;
1096 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1097 sg->length = sym->aead.data.length;
1098 length += sg->length;
1101 memcpy(ctx->digest, sym->aead.digest.data,
1102 ses->digest_length);
1105 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1106 sg->length = ses->digest_length;
1107 length += sg->length;
1111 /* input compound frame */
1112 cf->sg[1].length = length;
1113 cf->sg[1].extension = 1;
1114 cf->sg[1].final = 1;
1115 cpu_to_hw_sg(&cf->sg[1]);
1119 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1120 qm_sg_entry_set64(sg,
1121 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1122 sg->length = sym->aead.data.length + ses->auth_only_len;
1123 length = sg->length;
1124 if (is_encode(ses)) {
1126 /* set auth output */
1128 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1129 sg->length = ses->digest_length;
1130 length += sg->length;
1135 /* output compound frame */
1136 cf->sg[0].length = length;
1137 cf->sg[0].extension = 1;
1138 cpu_to_hw_sg(&cf->sg[0]);
1143 static inline struct dpaa_sec_job *
1144 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1146 struct rte_crypto_sym_op *sym = op->sym;
1147 struct dpaa_sec_job *cf;
1148 struct dpaa_sec_op_ctx *ctx;
1149 struct qm_sg_entry *sg, *out_sg, *in_sg;
1150 struct rte_mbuf *mbuf;
1152 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1157 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1160 req_segs = mbuf->nb_segs * 2 + 4;
1163 if (req_segs > MAX_SG_ENTRIES) {
1164 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1169 ctx = dpaa_sec_alloc_ctx(ses);
1176 rte_prefetch0(cf->sg);
1179 out_sg = &cf->sg[0];
1180 out_sg->extension = 1;
1182 out_sg->length = sym->auth.data.length + ses->digest_length;
1184 out_sg->length = sym->auth.data.length;
1186 /* output sg entries */
1188 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1189 cpu_to_hw_sg(out_sg);
1192 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1193 sg->length = mbuf->data_len - sym->auth.data.offset;
1194 sg->offset = sym->auth.data.offset;
1196 /* Successive segs */
1201 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1202 sg->length = mbuf->data_len;
1205 sg->length -= ses->digest_length;
1207 if (is_encode(ses)) {
1209 /* set auth output */
1211 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1212 sg->length = ses->digest_length;
1220 in_sg->extension = 1;
1223 in_sg->length = ses->iv.length + sym->auth.data.length;
1225 in_sg->length = ses->iv.length + sym->auth.data.length
1226 + ses->digest_length;
1228 /* input sg entries */
1230 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1231 cpu_to_hw_sg(in_sg);
1234 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1235 sg->length = ses->iv.length;
1240 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1241 sg->length = mbuf->data_len - sym->auth.data.offset;
1242 sg->offset = sym->auth.data.offset;
1244 /* Successive segs */
1249 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1250 sg->length = mbuf->data_len;
1254 sg->length -= ses->digest_length;
1255 if (is_decode(ses)) {
1258 memcpy(ctx->digest, sym->auth.digest.data,
1259 ses->digest_length);
1260 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1261 sg->length = ses->digest_length;
1269 static inline struct dpaa_sec_job *
1270 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1272 struct rte_crypto_sym_op *sym = op->sym;
1273 struct dpaa_sec_job *cf;
1274 struct dpaa_sec_op_ctx *ctx;
1275 struct qm_sg_entry *sg;
1276 rte_iova_t src_start_addr, dst_start_addr;
1277 uint32_t length = 0;
1278 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1281 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1283 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1285 dst_start_addr = src_start_addr;
1287 ctx = dpaa_sec_alloc_ctx(ses);
1295 rte_prefetch0(cf->sg);
1297 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1298 if (is_encode(ses)) {
1299 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1300 sg->length = ses->iv.length;
1301 length += sg->length;
1305 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1306 sg->length = sym->auth.data.length;
1307 length += sg->length;
1311 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1312 sg->length = ses->iv.length;
1313 length += sg->length;
1318 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1319 sg->length = sym->auth.data.length;
1320 length += sg->length;
1323 memcpy(ctx->digest, sym->auth.digest.data,
1324 ses->digest_length);
1327 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1328 sg->length = ses->digest_length;
1329 length += sg->length;
1333 /* input compound frame */
1334 cf->sg[1].length = length;
1335 cf->sg[1].extension = 1;
1336 cf->sg[1].final = 1;
1337 cpu_to_hw_sg(&cf->sg[1]);
1341 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1342 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1343 sg->length = sym->cipher.data.length;
1344 length = sg->length;
1345 if (is_encode(ses)) {
1347 /* set auth output */
1349 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1350 sg->length = ses->digest_length;
1351 length += sg->length;
1356 /* output compound frame */
1357 cf->sg[0].length = length;
1358 cf->sg[0].extension = 1;
1359 cpu_to_hw_sg(&cf->sg[0]);
1364 static inline struct dpaa_sec_job *
1365 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1367 struct rte_crypto_sym_op *sym = op->sym;
1368 struct dpaa_sec_job *cf;
1369 struct dpaa_sec_op_ctx *ctx;
1370 struct qm_sg_entry *sg;
1371 phys_addr_t src_start_addr, dst_start_addr;
1373 ctx = dpaa_sec_alloc_ctx(ses);
1379 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1382 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1384 dst_start_addr = src_start_addr;
1388 qm_sg_entry_set64(sg, src_start_addr);
1389 sg->length = sym->m_src->pkt_len;
1393 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1396 qm_sg_entry_set64(sg, dst_start_addr);
1397 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1404 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1407 /* Function to transmit the frames to given device and queuepair */
1409 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1410 uint16_t num_tx = 0;
1411 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1412 uint32_t frames_to_send;
1413 struct rte_crypto_op *op;
1414 struct dpaa_sec_job *cf;
1415 dpaa_sec_session *ses;
1416 struct dpaa_sec_op_ctx *ctx;
1417 uint32_t auth_only_len;
1418 struct qman_fq *inq[DPAA_SEC_BURST];
1421 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1422 DPAA_SEC_BURST : nb_ops;
1423 for (loop = 0; loop < frames_to_send; loop++) {
1425 switch (op->sess_type) {
1426 case RTE_CRYPTO_OP_WITH_SESSION:
1427 ses = (dpaa_sec_session *)
1428 get_session_private_data(
1430 cryptodev_driver_id);
1432 case RTE_CRYPTO_OP_SECURITY_SESSION:
1433 ses = (dpaa_sec_session *)
1434 get_sec_session_private_data(
1435 op->sym->sec_session);
1439 "sessionless crypto op not supported");
1440 frames_to_send = loop;
1444 if (unlikely(!ses->qp || ses->qp != qp)) {
1445 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1447 if (dpaa_sec_attach_sess_q(qp, ses)) {
1448 frames_to_send = loop;
1454 auth_only_len = op->sym->auth.data.length -
1455 op->sym->cipher.data.length;
1456 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1457 if (is_auth_only(ses)) {
1458 cf = build_auth_only(op, ses);
1459 } else if (is_cipher_only(ses)) {
1460 cf = build_cipher_only(op, ses);
1461 } else if (is_aead(ses)) {
1462 cf = build_cipher_auth_gcm(op, ses);
1463 auth_only_len = ses->auth_only_len;
1464 } else if (is_auth_cipher(ses)) {
1465 cf = build_cipher_auth(op, ses);
1466 } else if (is_proto_ipsec(ses)) {
1467 cf = build_proto(op, ses);
1469 PMD_TX_LOG(ERR, "not supported sec op");
1470 frames_to_send = loop;
1475 if (is_auth_only(ses)) {
1476 cf = build_auth_only_sg(op, ses);
1477 } else if (is_cipher_only(ses)) {
1478 cf = build_cipher_only_sg(op, ses);
1479 } else if (is_aead(ses)) {
1480 cf = build_cipher_auth_gcm_sg(op, ses);
1481 auth_only_len = ses->auth_only_len;
1482 } else if (is_auth_cipher(ses)) {
1483 cf = build_cipher_auth_sg(op, ses);
1485 PMD_TX_LOG(ERR, "not supported sec op");
1486 frames_to_send = loop;
1491 if (unlikely(!cf)) {
1492 frames_to_send = loop;
1498 inq[loop] = ses->inq;
1499 fd->opaque_addr = 0;
1501 ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1502 qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1503 fd->_format1 = qm_fd_compound;
1504 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1505 /* Auth_only_len is set as 0 in descriptor and it is
1506 * overwritten here in the fd.cmd which will update
1510 fd->cmd = 0x80000000 | auth_only_len;
1515 while (loop < frames_to_send) {
1516 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1517 frames_to_send - loop);
1519 nb_ops -= frames_to_send;
1520 num_tx += frames_to_send;
1523 dpaa_qp->tx_pkts += num_tx;
1524 dpaa_qp->tx_errs += nb_ops - num_tx;
1530 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1534 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1536 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1538 dpaa_qp->rx_pkts += num_rx;
1539 dpaa_qp->rx_errs += nb_ops - num_rx;
1541 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1546 /** Release queue pair */
1548 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1551 struct dpaa_sec_dev_private *internals;
1552 struct dpaa_sec_qp *qp = NULL;
1554 PMD_INIT_FUNC_TRACE();
1556 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1558 internals = dev->data->dev_private;
1559 if (qp_id >= internals->max_nb_queue_pairs) {
1560 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1561 internals->max_nb_queue_pairs);
1565 qp = &internals->qps[qp_id];
1566 qp->internals = NULL;
1567 dev->data->queue_pairs[qp_id] = NULL;
1572 /** Setup a queue pair */
1574 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1575 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1576 __rte_unused int socket_id,
1577 __rte_unused struct rte_mempool *session_pool)
1579 struct dpaa_sec_dev_private *internals;
1580 struct dpaa_sec_qp *qp = NULL;
1582 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1583 dev, qp_id, qp_conf);
1585 internals = dev->data->dev_private;
1586 if (qp_id >= internals->max_nb_queue_pairs) {
1587 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1588 internals->max_nb_queue_pairs);
1592 qp = &internals->qps[qp_id];
1593 qp->internals = internals;
1594 dev->data->queue_pairs[qp_id] = qp;
1599 /** Start queue pair */
1601 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1602 __rte_unused uint16_t queue_pair_id)
1604 PMD_INIT_FUNC_TRACE();
1609 /** Stop queue pair */
1611 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1612 __rte_unused uint16_t queue_pair_id)
1614 PMD_INIT_FUNC_TRACE();
1619 /** Return the number of allocated queue pairs */
1621 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1623 PMD_INIT_FUNC_TRACE();
1625 return dev->data->nb_queue_pairs;
1628 /** Returns the size of session structure */
1630 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1632 PMD_INIT_FUNC_TRACE();
1634 return sizeof(dpaa_sec_session);
1638 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1639 struct rte_crypto_sym_xform *xform,
1640 dpaa_sec_session *session)
1642 session->cipher_alg = xform->cipher.algo;
1643 session->iv.length = xform->cipher.iv.length;
1644 session->iv.offset = xform->cipher.iv.offset;
1645 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1646 RTE_CACHE_LINE_SIZE);
1647 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1648 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1651 session->cipher_key.length = xform->cipher.key.length;
1653 memcpy(session->cipher_key.data, xform->cipher.key.data,
1654 xform->cipher.key.length);
1655 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1662 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1663 struct rte_crypto_sym_xform *xform,
1664 dpaa_sec_session *session)
1666 session->auth_alg = xform->auth.algo;
1667 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1668 RTE_CACHE_LINE_SIZE);
1669 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1670 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1673 session->auth_key.length = xform->auth.key.length;
1674 session->digest_length = xform->auth.digest_length;
1676 memcpy(session->auth_key.data, xform->auth.key.data,
1677 xform->auth.key.length);
1678 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1685 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1686 struct rte_crypto_sym_xform *xform,
1687 dpaa_sec_session *session)
1689 session->aead_alg = xform->aead.algo;
1690 session->iv.length = xform->aead.iv.length;
1691 session->iv.offset = xform->aead.iv.offset;
1692 session->auth_only_len = xform->aead.aad_length;
1693 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1694 RTE_CACHE_LINE_SIZE);
1695 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1696 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1699 session->aead_key.length = xform->aead.key.length;
1700 session->digest_length = xform->aead.digest_length;
1702 memcpy(session->aead_key.data, xform->aead.key.data,
1703 xform->aead.key.length);
1704 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1710 static struct qman_fq *
1711 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1715 for (i = 0; i < qi->max_nb_sessions; i++) {
1716 if (qi->inq_attach[i] == 0) {
1717 qi->inq_attach[i] = 1;
1721 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1727 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1731 for (i = 0; i < qi->max_nb_sessions; i++) {
1732 if (&qi->inq[i] == fq) {
1733 qman_retire_fq(fq, NULL);
1735 qi->inq_attach[i] = 0;
1743 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1748 ret = dpaa_sec_prep_cdb(sess);
1750 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1754 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1755 qman_fq_fqid(&qp->outq));
1757 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1763 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1764 uint16_t qp_id __rte_unused,
1765 void *ses __rte_unused)
1767 PMD_INIT_FUNC_TRACE();
1772 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1773 uint16_t qp_id __rte_unused,
1776 dpaa_sec_session *sess = ses;
1777 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1779 PMD_INIT_FUNC_TRACE();
1782 dpaa_sec_detach_rxq(qi, sess->inq);
1791 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1792 struct rte_crypto_sym_xform *xform, void *sess)
1794 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1795 dpaa_sec_session *session = sess;
1797 PMD_INIT_FUNC_TRACE();
1799 if (unlikely(sess == NULL)) {
1800 RTE_LOG(ERR, PMD, "invalid session struct\n");
1804 /* Default IV length = 0 */
1805 session->iv.length = 0;
1808 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1809 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1810 dpaa_sec_cipher_init(dev, xform, session);
1812 /* Authentication Only */
1813 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1814 xform->next == NULL) {
1815 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1816 dpaa_sec_auth_init(dev, xform, session);
1818 /* Cipher then Authenticate */
1819 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1820 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1821 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1822 dpaa_sec_cipher_init(dev, xform, session);
1823 dpaa_sec_auth_init(dev, xform->next, session);
1825 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1829 /* Authenticate then Cipher */
1830 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1831 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1832 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1833 dpaa_sec_auth_init(dev, xform, session);
1834 dpaa_sec_cipher_init(dev, xform->next, session);
1836 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1840 /* AEAD operation for AES-GCM kind of Algorithms */
1841 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1842 xform->next == NULL) {
1843 dpaa_sec_aead_init(dev, xform, session);
1846 PMD_DRV_LOG(ERR, "Invalid crypto type");
1849 session->ctx_pool = internals->ctx_pool;
1850 session->inq = dpaa_sec_attach_rxq(internals);
1851 if (session->inq == NULL) {
1852 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1859 rte_free(session->cipher_key.data);
1860 rte_free(session->auth_key.data);
1861 memset(session, 0, sizeof(dpaa_sec_session));
1867 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1868 struct rte_crypto_sym_xform *xform,
1869 struct rte_cryptodev_sym_session *sess,
1870 struct rte_mempool *mempool)
1872 void *sess_private_data;
1875 PMD_INIT_FUNC_TRACE();
1877 if (rte_mempool_get(mempool, &sess_private_data)) {
1879 "Couldn't get object from session mempool");
1883 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1885 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1886 "session parameters");
1888 /* Return session to mempool */
1889 rte_mempool_put(mempool, sess_private_data);
1893 set_session_private_data(sess, dev->driver_id,
1900 /** Clear the memory of session so it doesn't leave key material behind */
1902 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1903 struct rte_cryptodev_sym_session *sess)
1905 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1906 uint8_t index = dev->driver_id;
1907 void *sess_priv = get_session_private_data(sess, index);
1909 PMD_INIT_FUNC_TRACE();
1911 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1914 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1917 dpaa_sec_detach_rxq(qi, s->inq);
1918 rte_free(s->cipher_key.data);
1919 rte_free(s->auth_key.data);
1920 memset(s, 0, sizeof(dpaa_sec_session));
1921 set_session_private_data(sess, index, NULL);
1922 rte_mempool_put(sess_mp, sess_priv);
1927 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1928 struct rte_security_session_conf *conf,
1931 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1932 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1933 struct rte_crypto_auth_xform *auth_xform;
1934 struct rte_crypto_cipher_xform *cipher_xform;
1935 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1937 PMD_INIT_FUNC_TRACE();
1939 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1940 cipher_xform = &conf->crypto_xform->cipher;
1941 auth_xform = &conf->crypto_xform->next->auth;
1943 auth_xform = &conf->crypto_xform->auth;
1944 cipher_xform = &conf->crypto_xform->next->cipher;
1946 session->proto_alg = conf->protocol;
1947 session->cipher_key.data = rte_zmalloc(NULL,
1948 cipher_xform->key.length,
1949 RTE_CACHE_LINE_SIZE);
1950 if (session->cipher_key.data == NULL &&
1951 cipher_xform->key.length > 0) {
1952 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1956 session->cipher_key.length = cipher_xform->key.length;
1957 session->auth_key.data = rte_zmalloc(NULL,
1958 auth_xform->key.length,
1959 RTE_CACHE_LINE_SIZE);
1960 if (session->auth_key.data == NULL &&
1961 auth_xform->key.length > 0) {
1962 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1963 rte_free(session->cipher_key.data);
1966 session->auth_key.length = auth_xform->key.length;
1967 memcpy(session->cipher_key.data, cipher_xform->key.data,
1968 cipher_xform->key.length);
1969 memcpy(session->auth_key.data, auth_xform->key.data,
1970 auth_xform->key.length);
1972 switch (auth_xform->algo) {
1973 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1974 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1976 case RTE_CRYPTO_AUTH_MD5_HMAC:
1977 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1979 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1980 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1982 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1983 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1985 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1986 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1988 case RTE_CRYPTO_AUTH_AES_CMAC:
1989 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1991 case RTE_CRYPTO_AUTH_NULL:
1992 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1994 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1995 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1996 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1997 case RTE_CRYPTO_AUTH_SHA1:
1998 case RTE_CRYPTO_AUTH_SHA256:
1999 case RTE_CRYPTO_AUTH_SHA512:
2000 case RTE_CRYPTO_AUTH_SHA224:
2001 case RTE_CRYPTO_AUTH_SHA384:
2002 case RTE_CRYPTO_AUTH_MD5:
2003 case RTE_CRYPTO_AUTH_AES_GMAC:
2004 case RTE_CRYPTO_AUTH_KASUMI_F9:
2005 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2006 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2007 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2011 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2016 switch (cipher_xform->algo) {
2017 case RTE_CRYPTO_CIPHER_AES_CBC:
2018 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2020 case RTE_CRYPTO_CIPHER_3DES_CBC:
2021 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2023 case RTE_CRYPTO_CIPHER_AES_CTR:
2024 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2026 case RTE_CRYPTO_CIPHER_NULL:
2027 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2028 case RTE_CRYPTO_CIPHER_3DES_ECB:
2029 case RTE_CRYPTO_CIPHER_AES_ECB:
2030 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2031 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2032 cipher_xform->algo);
2035 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2036 cipher_xform->algo);
2040 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2041 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2042 sizeof(session->ip4_hdr));
2043 session->ip4_hdr.ip_v = IPVERSION;
2044 session->ip4_hdr.ip_hl = 5;
2045 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2046 sizeof(session->ip4_hdr));
2047 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2048 session->ip4_hdr.ip_id = 0;
2049 session->ip4_hdr.ip_off = 0;
2050 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2051 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2052 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2054 session->ip4_hdr.ip_sum = 0;
2055 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2056 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2057 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2058 (void *)&session->ip4_hdr,
2061 session->encap_pdb.options =
2062 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2063 PDBOPTS_ESP_OIHI_PDB_INL |
2065 PDBHMO_ESP_ENCAP_DTTL;
2066 session->encap_pdb.spi = ipsec_xform->spi;
2067 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2069 session->dir = DIR_ENC;
2070 } else if (ipsec_xform->direction ==
2071 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2072 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2073 session->decap_pdb.options = sizeof(struct ip) << 16;
2074 session->dir = DIR_DEC;
2077 session->ctx_pool = internals->ctx_pool;
2078 session->inq = dpaa_sec_attach_rxq(internals);
2079 if (session->inq == NULL) {
2080 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2087 rte_free(session->auth_key.data);
2088 rte_free(session->cipher_key.data);
2089 memset(session, 0, sizeof(dpaa_sec_session));
2094 dpaa_sec_security_session_create(void *dev,
2095 struct rte_security_session_conf *conf,
2096 struct rte_security_session *sess,
2097 struct rte_mempool *mempool)
2099 void *sess_private_data;
2100 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2103 if (rte_mempool_get(mempool, &sess_private_data)) {
2105 "Couldn't get object from session mempool");
2109 switch (conf->protocol) {
2110 case RTE_SECURITY_PROTOCOL_IPSEC:
2111 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2114 case RTE_SECURITY_PROTOCOL_MACSEC:
2121 "DPAA2 PMD: failed to configure session parameters");
2123 /* Return session to mempool */
2124 rte_mempool_put(mempool, sess_private_data);
2128 set_sec_session_private_data(sess, sess_private_data);
2133 /** Clear the memory of session so it doesn't leave key material behind */
2135 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2136 struct rte_security_session *sess)
2138 PMD_INIT_FUNC_TRACE();
2139 void *sess_priv = get_sec_session_private_data(sess);
2141 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2144 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2146 rte_free(s->cipher_key.data);
2147 rte_free(s->auth_key.data);
2148 memset(sess, 0, sizeof(dpaa_sec_session));
2149 set_sec_session_private_data(sess, NULL);
2150 rte_mempool_put(sess_mp, sess_priv);
2157 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2158 struct rte_cryptodev_config *config __rte_unused)
2160 PMD_INIT_FUNC_TRACE();
2166 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2168 PMD_INIT_FUNC_TRACE();
2173 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2175 PMD_INIT_FUNC_TRACE();
2179 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
2181 PMD_INIT_FUNC_TRACE();
2186 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2187 struct rte_cryptodev_info *info)
2189 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2191 PMD_INIT_FUNC_TRACE();
2193 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2194 info->feature_flags = dev->feature_flags;
2195 info->capabilities = dpaa_sec_capabilities;
2196 info->sym.max_nb_sessions = internals->max_nb_sessions;
2197 info->sym.max_nb_sessions_per_qp =
2198 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2199 RTE_DPAA_MAX_NB_SEC_QPS;
2200 info->driver_id = cryptodev_driver_id;
2204 static struct rte_cryptodev_ops crypto_ops = {
2205 .dev_configure = dpaa_sec_dev_configure,
2206 .dev_start = dpaa_sec_dev_start,
2207 .dev_stop = dpaa_sec_dev_stop,
2208 .dev_close = dpaa_sec_dev_close,
2209 .dev_infos_get = dpaa_sec_dev_infos_get,
2210 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2211 .queue_pair_release = dpaa_sec_queue_pair_release,
2212 .queue_pair_start = dpaa_sec_queue_pair_start,
2213 .queue_pair_stop = dpaa_sec_queue_pair_stop,
2214 .queue_pair_count = dpaa_sec_queue_pair_count,
2215 .session_get_size = dpaa_sec_session_get_size,
2216 .session_configure = dpaa_sec_session_configure,
2217 .session_clear = dpaa_sec_session_clear,
2218 .qp_attach_session = dpaa_sec_qp_attach_sess,
2219 .qp_detach_session = dpaa_sec_qp_detach_sess,
2222 static const struct rte_security_capability *
2223 dpaa_sec_capabilities_get(void *device __rte_unused)
2225 return dpaa_sec_security_cap;
2228 struct rte_security_ops dpaa_sec_security_ops = {
2229 .session_create = dpaa_sec_security_session_create,
2230 .session_update = NULL,
2231 .session_stats_get = NULL,
2232 .session_destroy = dpaa_sec_security_session_destroy,
2233 .set_pkt_metadata = NULL,
2234 .capabilities_get = dpaa_sec_capabilities_get
2238 dpaa_sec_uninit(struct rte_cryptodev *dev)
2240 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2245 rte_free(dev->security_ctx);
2247 rte_mempool_free(internals->ctx_pool);
2248 rte_free(internals);
2250 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2251 dev->data->name, rte_socket_id());
2257 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2259 struct dpaa_sec_dev_private *internals;
2260 struct rte_security_ctx *security_instance;
2261 struct dpaa_sec_qp *qp;
2266 PMD_INIT_FUNC_TRACE();
2268 cryptodev->driver_id = cryptodev_driver_id;
2269 cryptodev->dev_ops = &crypto_ops;
2271 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2272 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2273 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2274 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2275 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2276 RTE_CRYPTODEV_FF_SECURITY |
2277 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2279 internals = cryptodev->data->dev_private;
2280 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2281 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2284 * For secondary processes, we don't initialise any further as primary
2285 * has already done this work. Only check we don't need a different
2288 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2289 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2293 /* Initialize security_ctx only for primary process*/
2294 security_instance = rte_malloc("rte_security_instances_ops",
2295 sizeof(struct rte_security_ctx), 0);
2296 if (security_instance == NULL)
2298 security_instance->device = (void *)cryptodev;
2299 security_instance->ops = &dpaa_sec_security_ops;
2300 security_instance->sess_cnt = 0;
2301 cryptodev->security_ctx = security_instance;
2303 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2304 /* init qman fq for queue pair */
2305 qp = &internals->qps[i];
2306 ret = dpaa_sec_init_tx(&qp->outq);
2308 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
2313 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2314 QMAN_FQ_FLAG_TO_DCPORTAL;
2315 for (i = 0; i < internals->max_nb_sessions; i++) {
2316 /* create rx qman fq for sessions*/
2317 ret = qman_create_fq(0, flags, &internals->inq[i]);
2318 if (unlikely(ret != 0)) {
2319 PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2324 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
2325 internals->ctx_pool = rte_mempool_create((const char *)str,
2328 CTX_POOL_CACHE_SIZE, 0,
2329 NULL, NULL, NULL, NULL,
2331 if (!internals->ctx_pool) {
2332 RTE_LOG(ERR, PMD, "%s create failed\n", str);
2336 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2340 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2342 dpaa_sec_uninit(cryptodev);
2347 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2348 struct rte_dpaa_device *dpaa_dev)
2350 struct rte_cryptodev *cryptodev;
2351 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2355 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2357 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2358 if (cryptodev == NULL)
2361 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2362 cryptodev->data->dev_private = rte_zmalloc_socket(
2363 "cryptodev private structure",
2364 sizeof(struct dpaa_sec_dev_private),
2365 RTE_CACHE_LINE_SIZE,
2368 if (cryptodev->data->dev_private == NULL)
2369 rte_panic("Cannot allocate memzone for private "
2373 dpaa_dev->crypto_dev = cryptodev;
2374 cryptodev->device = &dpaa_dev->device;
2375 cryptodev->device->driver = &dpaa_drv->driver;
2377 /* init user callbacks */
2378 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2380 /* if sec device version is not configured */
2381 if (!rta_get_sec_era()) {
2382 const struct device_node *caam_node;
2384 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2385 const uint32_t *prop = of_get_property(caam_node,
2390 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2396 /* Invoke PMD device initialization function */
2397 retval = dpaa_sec_dev_init(cryptodev);
2401 /* In case of error, cleanup is done */
2402 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2403 rte_free(cryptodev->data->dev_private);
2405 rte_cryptodev_pmd_release_device(cryptodev);
2411 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2413 struct rte_cryptodev *cryptodev;
2416 cryptodev = dpaa_dev->crypto_dev;
2417 if (cryptodev == NULL)
2420 ret = dpaa_sec_uninit(cryptodev);
2424 return rte_cryptodev_pmd_destroy(cryptodev);
2427 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2428 .drv_type = FSL_DPAA_CRYPTO,
2430 .name = "DPAA SEC PMD"
2432 .probe = cryptodev_dpaa_sec_probe,
2433 .remove = cryptodev_dpaa_sec_remove,
2436 static struct cryptodev_driver dpaa_sec_crypto_drv;
2438 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2439 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
2440 cryptodev_driver_id);