1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2018 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
37 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec_log.h>
41 enum rta_sec_era rta_sec_era;
45 static uint8_t cryptodev_driver_id;
47 static __thread struct rte_crypto_op **dpaa_sec_ops;
48 static __thread int dpaa_sec_op_nb;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63 /* report op status to sym->op and then free the ctx memeory */
64 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
75 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
79 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82 * each packet, memset is costlier than dcbz_64().
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
87 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
89 ctx->ctx_pool = ses->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx
91 - rte_mempool_virt2iova(ctx);
96 static inline rte_iova_t
97 dpaa_mem_vtop(void *vaddr)
99 const struct rte_memseg *ms;
101 ms = rte_mem_virt2memseg(vaddr, NULL);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
108 dpaa_mem_ptov(rte_iova_t paddr)
110 return rte_mem_iova2virt(paddr);
114 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
116 const struct qm_mr_entry *msg)
118 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
119 fq->fqid, msg->ern.rc, msg->ern.seqnum);
122 /* initialize the queue with dest chan as caam chan so that
123 * all the packets in this queue could be dispatched into caam
126 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
129 struct qm_mcc_initfq fq_opts;
133 /* Clear FQ options */
134 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
136 flags = QMAN_INITFQ_FLAG_SCHED;
137 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
138 QM_INITFQ_WE_CONTEXTB;
140 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
141 fq_opts.fqd.context_b = fqid_out;
142 fq_opts.fqd.dest.channel = qm_channel_caam;
143 fq_opts.fqd.dest.wq = 0;
145 fq_in->cb.ern = ern_sec_fq_handler;
147 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
149 ret = qman_init_fq(fq_in, flags, &fq_opts);
150 if (unlikely(ret != 0))
151 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
156 /* something is put into in_fq and caam put the crypto result into out_fq */
157 static enum qman_cb_dqrr_result
158 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
159 struct qman_fq *fq __always_unused,
160 const struct qm_dqrr_entry *dqrr)
162 const struct qm_fd *fd;
163 struct dpaa_sec_job *job;
164 struct dpaa_sec_op_ctx *ctx;
166 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
167 return qman_cb_dqrr_defer;
169 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
170 return qman_cb_dqrr_consume;
173 /* sg is embedded in an op ctx,
174 * sg[0] is for output
177 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
179 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
180 ctx->fd_status = fd->status;
181 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
182 struct qm_sg_entry *sg_out;
185 sg_out = &job->sg[0];
186 hw_sg_to_cpu(sg_out);
187 len = sg_out->length;
188 ctx->op->sym->m_src->pkt_len = len;
189 ctx->op->sym->m_src->data_len = len;
191 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
192 dpaa_sec_op_ending(ctx);
194 return qman_cb_dqrr_consume;
197 /* caam result is put into this queue */
199 dpaa_sec_init_tx(struct qman_fq *fq)
202 struct qm_mcc_initfq opts;
205 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
206 QMAN_FQ_FLAG_DYNAMIC_FQID;
208 ret = qman_create_fq(0, flags, fq);
210 DPAA_SEC_ERR("qman_create_fq failed");
214 memset(&opts, 0, sizeof(opts));
215 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
216 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
218 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
220 fq->cb.dqrr = dqrr_out_fq_cb_rx;
221 fq->cb.ern = ern_sec_fq_handler;
223 ret = qman_init_fq(fq, 0, &opts);
225 DPAA_SEC_ERR("unable to init caam source fq!");
232 static inline int is_cipher_only(dpaa_sec_session *ses)
234 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
235 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
238 static inline int is_auth_only(dpaa_sec_session *ses)
240 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
241 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
244 static inline int is_aead(dpaa_sec_session *ses)
246 return ((ses->cipher_alg == 0) &&
247 (ses->auth_alg == 0) &&
248 (ses->aead_alg != 0));
251 static inline int is_auth_cipher(dpaa_sec_session *ses)
253 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
254 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
255 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
258 static inline int is_proto_ipsec(dpaa_sec_session *ses)
260 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
263 static inline int is_encode(dpaa_sec_session *ses)
265 return ses->dir == DIR_ENC;
268 static inline int is_decode(dpaa_sec_session *ses)
270 return ses->dir == DIR_DEC;
274 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
276 switch (ses->auth_alg) {
277 case RTE_CRYPTO_AUTH_NULL:
278 ses->digest_length = 0;
280 case RTE_CRYPTO_AUTH_MD5_HMAC:
282 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
283 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
284 alginfo_a->algmode = OP_ALG_AAI_HMAC;
286 case RTE_CRYPTO_AUTH_SHA1_HMAC:
288 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
289 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
290 alginfo_a->algmode = OP_ALG_AAI_HMAC;
292 case RTE_CRYPTO_AUTH_SHA224_HMAC:
294 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
295 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
296 alginfo_a->algmode = OP_ALG_AAI_HMAC;
298 case RTE_CRYPTO_AUTH_SHA256_HMAC:
300 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
302 alginfo_a->algmode = OP_ALG_AAI_HMAC;
304 case RTE_CRYPTO_AUTH_SHA384_HMAC:
306 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
308 alginfo_a->algmode = OP_ALG_AAI_HMAC;
310 case RTE_CRYPTO_AUTH_SHA512_HMAC:
312 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
314 alginfo_a->algmode = OP_ALG_AAI_HMAC;
317 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
322 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
324 switch (ses->cipher_alg) {
325 case RTE_CRYPTO_CIPHER_NULL:
327 case RTE_CRYPTO_CIPHER_AES_CBC:
329 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
330 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
331 alginfo_c->algmode = OP_ALG_AAI_CBC;
333 case RTE_CRYPTO_CIPHER_3DES_CBC:
335 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
337 alginfo_c->algmode = OP_ALG_AAI_CBC;
339 case RTE_CRYPTO_CIPHER_AES_CTR:
341 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
343 alginfo_c->algmode = OP_ALG_AAI_CTR;
346 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
351 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
353 switch (ses->aead_alg) {
354 case RTE_CRYPTO_AEAD_AES_GCM:
355 alginfo->algtype = OP_ALG_ALGSEL_AES;
356 alginfo->algmode = OP_ALG_AAI_GCM;
359 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
364 /* prepare command block of the session */
366 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
368 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
369 int32_t shared_desc_len = 0;
370 struct sec_cdb *cdb = &ses->cdb;
372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 memset(cdb, 0, sizeof(struct sec_cdb));
380 if (is_cipher_only(ses)) {
381 caam_cipher_alg(ses, &alginfo_c);
382 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
383 DPAA_SEC_ERR("not supported cipher alg");
387 alginfo_c.key = (size_t)ses->cipher_key.data;
388 alginfo_c.keylen = ses->cipher_key.length;
389 alginfo_c.key_enc_flags = 0;
390 alginfo_c.key_type = RTA_DATA_IMM;
392 shared_desc_len = cnstr_shdsc_blkcipher(
398 } else if (is_auth_only(ses)) {
399 caam_auth_alg(ses, &alginfo_a);
400 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
401 DPAA_SEC_ERR("not supported auth alg");
405 alginfo_a.key = (size_t)ses->auth_key.data;
406 alginfo_a.keylen = ses->auth_key.length;
407 alginfo_a.key_enc_flags = 0;
408 alginfo_a.key_type = RTA_DATA_IMM;
410 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
414 } else if (is_aead(ses)) {
415 caam_aead_alg(ses, &alginfo);
416 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
417 DPAA_SEC_ERR("not supported aead alg");
420 alginfo.key = (size_t)ses->aead_key.data;
421 alginfo.keylen = ses->aead_key.length;
422 alginfo.key_enc_flags = 0;
423 alginfo.key_type = RTA_DATA_IMM;
425 if (ses->dir == DIR_ENC)
426 shared_desc_len = cnstr_shdsc_gcm_encap(
427 cdb->sh_desc, true, swap,
432 shared_desc_len = cnstr_shdsc_gcm_decap(
433 cdb->sh_desc, true, swap,
438 caam_cipher_alg(ses, &alginfo_c);
439 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
440 DPAA_SEC_ERR("not supported cipher alg");
444 alginfo_c.key = (size_t)ses->cipher_key.data;
445 alginfo_c.keylen = ses->cipher_key.length;
446 alginfo_c.key_enc_flags = 0;
447 alginfo_c.key_type = RTA_DATA_IMM;
449 caam_auth_alg(ses, &alginfo_a);
450 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
451 DPAA_SEC_ERR("not supported auth alg");
455 alginfo_a.key = (size_t)ses->auth_key.data;
456 alginfo_a.keylen = ses->auth_key.length;
457 alginfo_a.key_enc_flags = 0;
458 alginfo_a.key_type = RTA_DATA_IMM;
460 cdb->sh_desc[0] = alginfo_c.keylen;
461 cdb->sh_desc[1] = alginfo_a.keylen;
462 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
464 (unsigned int *)cdb->sh_desc,
465 &cdb->sh_desc[2], 2);
468 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
471 if (cdb->sh_desc[2] & 1)
472 alginfo_c.key_type = RTA_DATA_IMM;
474 alginfo_c.key = (size_t)dpaa_mem_vtop(
475 (void *)(size_t)alginfo_c.key);
476 alginfo_c.key_type = RTA_DATA_PTR;
478 if (cdb->sh_desc[2] & (1<<1))
479 alginfo_a.key_type = RTA_DATA_IMM;
481 alginfo_a.key = (size_t)dpaa_mem_vtop(
482 (void *)(size_t)alginfo_a.key);
483 alginfo_a.key_type = RTA_DATA_PTR;
488 if (is_proto_ipsec(ses)) {
489 if (ses->dir == DIR_ENC) {
490 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
492 true, swap, SHR_SERIAL,
494 (uint8_t *)&ses->ip4_hdr,
495 &alginfo_c, &alginfo_a);
496 } else if (ses->dir == DIR_DEC) {
497 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
499 true, swap, SHR_SERIAL,
501 &alginfo_c, &alginfo_a);
504 /* Auth_only_len is set as 0 here and it will be
505 * overwritten in fd for each packet.
507 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
508 true, swap, &alginfo_c, &alginfo_a,
510 ses->digest_length, ses->dir);
514 if (shared_desc_len < 0) {
515 DPAA_SEC_ERR("error in preparing command block");
516 return shared_desc_len;
519 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
520 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
521 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
526 /* qp is lockless, should be accessed by only one thread */
528 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
531 unsigned int pkts = 0;
532 int num_rx_bufs, ret;
533 struct qm_dqrr_entry *dq;
534 uint32_t vdqcr_flags = 0;
538 * Until request for four buffers, we provide exact number of buffers.
539 * Otherwise we do not set the QM_VDQCR_EXACT flag.
540 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
541 * requested, so we request two less in this case.
544 vdqcr_flags = QM_VDQCR_EXACT;
545 num_rx_bufs = nb_ops;
547 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
548 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
550 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
555 const struct qm_fd *fd;
556 struct dpaa_sec_job *job;
557 struct dpaa_sec_op_ctx *ctx;
558 struct rte_crypto_op *op;
560 dq = qman_dequeue(fq);
565 /* sg is embedded in an op ctx,
566 * sg[0] is for output
569 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
571 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
572 ctx->fd_status = fd->status;
574 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
575 struct qm_sg_entry *sg_out;
578 sg_out = &job->sg[0];
579 hw_sg_to_cpu(sg_out);
580 len = sg_out->length;
581 op->sym->m_src->pkt_len = len;
582 op->sym->m_src->data_len = len;
584 if (!ctx->fd_status) {
585 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
587 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
588 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
592 /* report op status to sym->op and then free the ctx memeory */
593 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
595 qman_dqrr_consume(fq, dq);
596 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
601 static inline struct dpaa_sec_job *
602 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
604 struct rte_crypto_sym_op *sym = op->sym;
605 struct rte_mbuf *mbuf = sym->m_src;
606 struct dpaa_sec_job *cf;
607 struct dpaa_sec_op_ctx *ctx;
608 struct qm_sg_entry *sg, *out_sg, *in_sg;
609 phys_addr_t start_addr;
610 uint8_t *old_digest, extra_segs;
617 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
618 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
622 ctx = dpaa_sec_alloc_ctx(ses);
628 old_digest = ctx->digest;
632 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
633 out_sg->length = ses->digest_length;
634 cpu_to_hw_sg(out_sg);
638 /* need to extend the input to a compound frame */
639 in_sg->extension = 1;
641 in_sg->length = sym->auth.data.length;
642 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
646 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
647 sg->length = mbuf->data_len - sym->auth.data.offset;
648 sg->offset = sym->auth.data.offset;
650 /* Successive segs */
655 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
656 sg->length = mbuf->data_len;
660 if (is_decode(ses)) {
661 /* Digest verification case */
664 rte_memcpy(old_digest, sym->auth.digest.data,
666 start_addr = dpaa_mem_vtop(old_digest);
667 qm_sg_entry_set64(sg, start_addr);
668 sg->length = ses->digest_length;
669 in_sg->length += ses->digest_length;
671 /* Digest calculation case */
672 sg->length -= ses->digest_length;
683 * |<----data_len------->|
684 * |ip_header|ah_header|icv|payload|
689 static inline struct dpaa_sec_job *
690 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
692 struct rte_crypto_sym_op *sym = op->sym;
693 struct rte_mbuf *mbuf = sym->m_src;
694 struct dpaa_sec_job *cf;
695 struct dpaa_sec_op_ctx *ctx;
696 struct qm_sg_entry *sg;
697 rte_iova_t start_addr;
700 ctx = dpaa_sec_alloc_ctx(ses);
706 old_digest = ctx->digest;
708 start_addr = rte_pktmbuf_iova(mbuf);
711 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
712 sg->length = ses->digest_length;
717 if (is_decode(ses)) {
718 /* need to extend the input to a compound frame */
720 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
721 sg->length = sym->auth.data.length + ses->digest_length;
726 /* hash result or digest, save digest first */
727 rte_memcpy(old_digest, sym->auth.digest.data,
729 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
730 sg->length = sym->auth.data.length;
733 /* let's check digest by hw */
734 start_addr = dpaa_mem_vtop(old_digest);
736 qm_sg_entry_set64(sg, start_addr);
737 sg->length = ses->digest_length;
741 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
742 sg->length = sym->auth.data.length;
750 static inline struct dpaa_sec_job *
751 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
753 struct rte_crypto_sym_op *sym = op->sym;
754 struct dpaa_sec_job *cf;
755 struct dpaa_sec_op_ctx *ctx;
756 struct qm_sg_entry *sg, *out_sg, *in_sg;
757 struct rte_mbuf *mbuf;
759 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
764 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
767 req_segs = mbuf->nb_segs * 2 + 3;
770 if (req_segs > MAX_SG_ENTRIES) {
771 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
776 ctx = dpaa_sec_alloc_ctx(ses);
785 out_sg->extension = 1;
786 out_sg->length = sym->cipher.data.length;
787 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
788 cpu_to_hw_sg(out_sg);
792 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
793 sg->length = mbuf->data_len - sym->cipher.data.offset;
794 sg->offset = sym->cipher.data.offset;
796 /* Successive segs */
801 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
802 sg->length = mbuf->data_len;
811 in_sg->extension = 1;
813 in_sg->length = sym->cipher.data.length + ses->iv.length;
816 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
820 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
821 sg->length = ses->iv.length;
826 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
827 sg->length = mbuf->data_len - sym->cipher.data.offset;
828 sg->offset = sym->cipher.data.offset;
830 /* Successive segs */
835 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
836 sg->length = mbuf->data_len;
845 static inline struct dpaa_sec_job *
846 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
848 struct rte_crypto_sym_op *sym = op->sym;
849 struct dpaa_sec_job *cf;
850 struct dpaa_sec_op_ctx *ctx;
851 struct qm_sg_entry *sg;
852 rte_iova_t src_start_addr, dst_start_addr;
853 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
856 ctx = dpaa_sec_alloc_ctx(ses);
863 src_start_addr = rte_pktmbuf_iova(sym->m_src);
866 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
868 dst_start_addr = src_start_addr;
872 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
873 sg->length = sym->cipher.data.length + ses->iv.length;
879 /* need to extend the input to a compound frame */
882 sg->length = sym->cipher.data.length + ses->iv.length;
883 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
887 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
888 sg->length = ses->iv.length;
892 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
893 sg->length = sym->cipher.data.length;
900 static inline struct dpaa_sec_job *
901 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
903 struct rte_crypto_sym_op *sym = op->sym;
904 struct dpaa_sec_job *cf;
905 struct dpaa_sec_op_ctx *ctx;
906 struct qm_sg_entry *sg, *out_sg, *in_sg;
907 struct rte_mbuf *mbuf;
909 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
914 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
917 req_segs = mbuf->nb_segs * 2 + 4;
920 if (ses->auth_only_len)
923 if (req_segs > MAX_SG_ENTRIES) {
924 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
929 ctx = dpaa_sec_alloc_ctx(ses);
936 rte_prefetch0(cf->sg);
940 out_sg->extension = 1;
942 out_sg->length = sym->aead.data.length + ses->auth_only_len
943 + ses->digest_length;
945 out_sg->length = sym->aead.data.length + ses->auth_only_len;
947 /* output sg entries */
949 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
950 cpu_to_hw_sg(out_sg);
953 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
954 sg->length = mbuf->data_len - sym->aead.data.offset +
956 sg->offset = sym->aead.data.offset - ses->auth_only_len;
958 /* Successive segs */
963 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
964 sg->length = mbuf->data_len;
967 sg->length -= ses->digest_length;
969 if (is_encode(ses)) {
971 /* set auth output */
973 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
974 sg->length = ses->digest_length;
982 in_sg->extension = 1;
985 in_sg->length = ses->iv.length + sym->aead.data.length
986 + ses->auth_only_len;
988 in_sg->length = ses->iv.length + sym->aead.data.length
989 + ses->auth_only_len + ses->digest_length;
991 /* input sg entries */
993 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
997 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
998 sg->length = ses->iv.length;
1001 /* 2nd seg auth only */
1002 if (ses->auth_only_len) {
1004 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1005 sg->length = ses->auth_only_len;
1011 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1012 sg->length = mbuf->data_len - sym->aead.data.offset;
1013 sg->offset = sym->aead.data.offset;
1015 /* Successive segs */
1020 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1021 sg->length = mbuf->data_len;
1025 if (is_decode(ses)) {
1028 memcpy(ctx->digest, sym->aead.digest.data,
1029 ses->digest_length);
1030 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1031 sg->length = ses->digest_length;
1039 static inline struct dpaa_sec_job *
1040 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1042 struct rte_crypto_sym_op *sym = op->sym;
1043 struct dpaa_sec_job *cf;
1044 struct dpaa_sec_op_ctx *ctx;
1045 struct qm_sg_entry *sg;
1046 uint32_t length = 0;
1047 rte_iova_t src_start_addr, dst_start_addr;
1048 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1051 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1054 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1056 dst_start_addr = src_start_addr;
1058 ctx = dpaa_sec_alloc_ctx(ses);
1066 rte_prefetch0(cf->sg);
1068 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1069 if (is_encode(ses)) {
1070 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1071 sg->length = ses->iv.length;
1072 length += sg->length;
1076 if (ses->auth_only_len) {
1077 qm_sg_entry_set64(sg,
1078 dpaa_mem_vtop(sym->aead.aad.data));
1079 sg->length = ses->auth_only_len;
1080 length += sg->length;
1084 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1085 sg->length = sym->aead.data.length;
1086 length += sg->length;
1090 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1091 sg->length = ses->iv.length;
1092 length += sg->length;
1096 if (ses->auth_only_len) {
1097 qm_sg_entry_set64(sg,
1098 dpaa_mem_vtop(sym->aead.aad.data));
1099 sg->length = ses->auth_only_len;
1100 length += sg->length;
1104 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1105 sg->length = sym->aead.data.length;
1106 length += sg->length;
1109 memcpy(ctx->digest, sym->aead.digest.data,
1110 ses->digest_length);
1113 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1114 sg->length = ses->digest_length;
1115 length += sg->length;
1119 /* input compound frame */
1120 cf->sg[1].length = length;
1121 cf->sg[1].extension = 1;
1122 cf->sg[1].final = 1;
1123 cpu_to_hw_sg(&cf->sg[1]);
1127 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1128 qm_sg_entry_set64(sg,
1129 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1130 sg->length = sym->aead.data.length + ses->auth_only_len;
1131 length = sg->length;
1132 if (is_encode(ses)) {
1134 /* set auth output */
1136 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1137 sg->length = ses->digest_length;
1138 length += sg->length;
1143 /* output compound frame */
1144 cf->sg[0].length = length;
1145 cf->sg[0].extension = 1;
1146 cpu_to_hw_sg(&cf->sg[0]);
1151 static inline struct dpaa_sec_job *
1152 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1154 struct rte_crypto_sym_op *sym = op->sym;
1155 struct dpaa_sec_job *cf;
1156 struct dpaa_sec_op_ctx *ctx;
1157 struct qm_sg_entry *sg, *out_sg, *in_sg;
1158 struct rte_mbuf *mbuf;
1160 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1165 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1168 req_segs = mbuf->nb_segs * 2 + 4;
1171 if (req_segs > MAX_SG_ENTRIES) {
1172 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1177 ctx = dpaa_sec_alloc_ctx(ses);
1184 rte_prefetch0(cf->sg);
1187 out_sg = &cf->sg[0];
1188 out_sg->extension = 1;
1190 out_sg->length = sym->auth.data.length + ses->digest_length;
1192 out_sg->length = sym->auth.data.length;
1194 /* output sg entries */
1196 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1197 cpu_to_hw_sg(out_sg);
1200 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1201 sg->length = mbuf->data_len - sym->auth.data.offset;
1202 sg->offset = sym->auth.data.offset;
1204 /* Successive segs */
1209 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1210 sg->length = mbuf->data_len;
1213 sg->length -= ses->digest_length;
1215 if (is_encode(ses)) {
1217 /* set auth output */
1219 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1220 sg->length = ses->digest_length;
1228 in_sg->extension = 1;
1231 in_sg->length = ses->iv.length + sym->auth.data.length;
1233 in_sg->length = ses->iv.length + sym->auth.data.length
1234 + ses->digest_length;
1236 /* input sg entries */
1238 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1239 cpu_to_hw_sg(in_sg);
1242 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1243 sg->length = ses->iv.length;
1248 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1249 sg->length = mbuf->data_len - sym->auth.data.offset;
1250 sg->offset = sym->auth.data.offset;
1252 /* Successive segs */
1257 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1258 sg->length = mbuf->data_len;
1262 sg->length -= ses->digest_length;
1263 if (is_decode(ses)) {
1266 memcpy(ctx->digest, sym->auth.digest.data,
1267 ses->digest_length);
1268 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1269 sg->length = ses->digest_length;
1277 static inline struct dpaa_sec_job *
1278 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1280 struct rte_crypto_sym_op *sym = op->sym;
1281 struct dpaa_sec_job *cf;
1282 struct dpaa_sec_op_ctx *ctx;
1283 struct qm_sg_entry *sg;
1284 rte_iova_t src_start_addr, dst_start_addr;
1285 uint32_t length = 0;
1286 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1289 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1291 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1293 dst_start_addr = src_start_addr;
1295 ctx = dpaa_sec_alloc_ctx(ses);
1303 rte_prefetch0(cf->sg);
1305 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1306 if (is_encode(ses)) {
1307 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1308 sg->length = ses->iv.length;
1309 length += sg->length;
1313 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1314 sg->length = sym->auth.data.length;
1315 length += sg->length;
1319 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1320 sg->length = ses->iv.length;
1321 length += sg->length;
1326 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1327 sg->length = sym->auth.data.length;
1328 length += sg->length;
1331 memcpy(ctx->digest, sym->auth.digest.data,
1332 ses->digest_length);
1335 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1336 sg->length = ses->digest_length;
1337 length += sg->length;
1341 /* input compound frame */
1342 cf->sg[1].length = length;
1343 cf->sg[1].extension = 1;
1344 cf->sg[1].final = 1;
1345 cpu_to_hw_sg(&cf->sg[1]);
1349 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1350 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1351 sg->length = sym->cipher.data.length;
1352 length = sg->length;
1353 if (is_encode(ses)) {
1355 /* set auth output */
1357 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1358 sg->length = ses->digest_length;
1359 length += sg->length;
1364 /* output compound frame */
1365 cf->sg[0].length = length;
1366 cf->sg[0].extension = 1;
1367 cpu_to_hw_sg(&cf->sg[0]);
1372 static inline struct dpaa_sec_job *
1373 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1375 struct rte_crypto_sym_op *sym = op->sym;
1376 struct dpaa_sec_job *cf;
1377 struct dpaa_sec_op_ctx *ctx;
1378 struct qm_sg_entry *sg;
1379 phys_addr_t src_start_addr, dst_start_addr;
1381 ctx = dpaa_sec_alloc_ctx(ses);
1387 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1390 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1392 dst_start_addr = src_start_addr;
1396 qm_sg_entry_set64(sg, src_start_addr);
1397 sg->length = sym->m_src->pkt_len;
1401 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1404 qm_sg_entry_set64(sg, dst_start_addr);
1405 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1412 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1415 /* Function to transmit the frames to given device and queuepair */
1417 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1418 uint16_t num_tx = 0;
1419 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1420 uint32_t frames_to_send;
1421 struct rte_crypto_op *op;
1422 struct dpaa_sec_job *cf;
1423 dpaa_sec_session *ses;
1424 uint32_t auth_only_len;
1425 struct qman_fq *inq[DPAA_SEC_BURST];
1428 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1429 DPAA_SEC_BURST : nb_ops;
1430 for (loop = 0; loop < frames_to_send; loop++) {
1432 switch (op->sess_type) {
1433 case RTE_CRYPTO_OP_WITH_SESSION:
1434 ses = (dpaa_sec_session *)
1435 get_sym_session_private_data(
1437 cryptodev_driver_id);
1439 case RTE_CRYPTO_OP_SECURITY_SESSION:
1440 ses = (dpaa_sec_session *)
1441 get_sec_session_private_data(
1442 op->sym->sec_session);
1446 "sessionless crypto op not supported");
1447 frames_to_send = loop;
1451 if (unlikely(!ses->qp || ses->qp != qp)) {
1452 DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1454 if (dpaa_sec_attach_sess_q(qp, ses)) {
1455 frames_to_send = loop;
1461 auth_only_len = op->sym->auth.data.length -
1462 op->sym->cipher.data.length;
1463 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1464 if (is_auth_only(ses)) {
1465 cf = build_auth_only(op, ses);
1466 } else if (is_cipher_only(ses)) {
1467 cf = build_cipher_only(op, ses);
1468 } else if (is_aead(ses)) {
1469 cf = build_cipher_auth_gcm(op, ses);
1470 auth_only_len = ses->auth_only_len;
1471 } else if (is_auth_cipher(ses)) {
1472 cf = build_cipher_auth(op, ses);
1473 } else if (is_proto_ipsec(ses)) {
1474 cf = build_proto(op, ses);
1476 DPAA_SEC_DP_ERR("not supported ops");
1477 frames_to_send = loop;
1482 if (is_auth_only(ses)) {
1483 cf = build_auth_only_sg(op, ses);
1484 } else if (is_cipher_only(ses)) {
1485 cf = build_cipher_only_sg(op, ses);
1486 } else if (is_aead(ses)) {
1487 cf = build_cipher_auth_gcm_sg(op, ses);
1488 auth_only_len = ses->auth_only_len;
1489 } else if (is_auth_cipher(ses)) {
1490 cf = build_cipher_auth_sg(op, ses);
1492 DPAA_SEC_DP_ERR("not supported ops");
1493 frames_to_send = loop;
1498 if (unlikely(!cf)) {
1499 frames_to_send = loop;
1505 inq[loop] = ses->inq;
1506 fd->opaque_addr = 0;
1508 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1509 fd->_format1 = qm_fd_compound;
1510 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1511 /* Auth_only_len is set as 0 in descriptor and it is
1512 * overwritten here in the fd.cmd which will update
1516 fd->cmd = 0x80000000 | auth_only_len;
1521 while (loop < frames_to_send) {
1522 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1523 frames_to_send - loop);
1525 nb_ops -= frames_to_send;
1526 num_tx += frames_to_send;
1529 dpaa_qp->tx_pkts += num_tx;
1530 dpaa_qp->tx_errs += nb_ops - num_tx;
1536 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1540 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1542 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1544 dpaa_qp->rx_pkts += num_rx;
1545 dpaa_qp->rx_errs += nb_ops - num_rx;
1547 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1552 /** Release queue pair */
1554 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1557 struct dpaa_sec_dev_private *internals;
1558 struct dpaa_sec_qp *qp = NULL;
1560 PMD_INIT_FUNC_TRACE();
1562 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1564 internals = dev->data->dev_private;
1565 if (qp_id >= internals->max_nb_queue_pairs) {
1566 DPAA_SEC_ERR("Max supported qpid %d",
1567 internals->max_nb_queue_pairs);
1571 qp = &internals->qps[qp_id];
1572 qp->internals = NULL;
1573 dev->data->queue_pairs[qp_id] = NULL;
1578 /** Setup a queue pair */
1580 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1581 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1582 __rte_unused int socket_id,
1583 __rte_unused struct rte_mempool *session_pool)
1585 struct dpaa_sec_dev_private *internals;
1586 struct dpaa_sec_qp *qp = NULL;
1588 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1590 internals = dev->data->dev_private;
1591 if (qp_id >= internals->max_nb_queue_pairs) {
1592 DPAA_SEC_ERR("Max supported qpid %d",
1593 internals->max_nb_queue_pairs);
1597 qp = &internals->qps[qp_id];
1598 qp->internals = internals;
1599 dev->data->queue_pairs[qp_id] = qp;
1604 /** Return the number of allocated queue pairs */
1606 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1608 PMD_INIT_FUNC_TRACE();
1610 return dev->data->nb_queue_pairs;
1613 /** Returns the size of session structure */
1615 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1617 PMD_INIT_FUNC_TRACE();
1619 return sizeof(dpaa_sec_session);
1623 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1624 struct rte_crypto_sym_xform *xform,
1625 dpaa_sec_session *session)
1627 session->cipher_alg = xform->cipher.algo;
1628 session->iv.length = xform->cipher.iv.length;
1629 session->iv.offset = xform->cipher.iv.offset;
1630 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1631 RTE_CACHE_LINE_SIZE);
1632 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1633 DPAA_SEC_ERR("No Memory for cipher key");
1636 session->cipher_key.length = xform->cipher.key.length;
1638 memcpy(session->cipher_key.data, xform->cipher.key.data,
1639 xform->cipher.key.length);
1640 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1647 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1648 struct rte_crypto_sym_xform *xform,
1649 dpaa_sec_session *session)
1651 session->auth_alg = xform->auth.algo;
1652 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1653 RTE_CACHE_LINE_SIZE);
1654 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1655 DPAA_SEC_ERR("No Memory for auth key");
1658 session->auth_key.length = xform->auth.key.length;
1659 session->digest_length = xform->auth.digest_length;
1661 memcpy(session->auth_key.data, xform->auth.key.data,
1662 xform->auth.key.length);
1663 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1670 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1671 struct rte_crypto_sym_xform *xform,
1672 dpaa_sec_session *session)
1674 session->aead_alg = xform->aead.algo;
1675 session->iv.length = xform->aead.iv.length;
1676 session->iv.offset = xform->aead.iv.offset;
1677 session->auth_only_len = xform->aead.aad_length;
1678 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1679 RTE_CACHE_LINE_SIZE);
1680 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1681 DPAA_SEC_ERR("No Memory for aead key\n");
1684 session->aead_key.length = xform->aead.key.length;
1685 session->digest_length = xform->aead.digest_length;
1687 memcpy(session->aead_key.data, xform->aead.key.data,
1688 xform->aead.key.length);
1689 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1695 static struct qman_fq *
1696 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1700 for (i = 0; i < qi->max_nb_sessions; i++) {
1701 if (qi->inq_attach[i] == 0) {
1702 qi->inq_attach[i] = 1;
1706 DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1712 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1716 for (i = 0; i < qi->max_nb_sessions; i++) {
1717 if (&qi->inq[i] == fq) {
1718 qman_retire_fq(fq, NULL);
1720 qi->inq_attach[i] = 0;
1728 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1733 ret = dpaa_sec_prep_cdb(sess);
1735 DPAA_SEC_ERR("Unable to prepare sec cdb");
1738 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1739 ret = rte_dpaa_portal_init((void *)0);
1741 DPAA_SEC_ERR("Failure in affining portal");
1745 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1746 qman_fq_fqid(&qp->outq));
1748 DPAA_SEC_ERR("Unable to init sec queue");
1754 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1755 struct rte_crypto_sym_xform *xform, void *sess)
1757 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1758 dpaa_sec_session *session = sess;
1760 PMD_INIT_FUNC_TRACE();
1762 if (unlikely(sess == NULL)) {
1763 DPAA_SEC_ERR("invalid session struct");
1766 memset(session, 0, sizeof(dpaa_sec_session));
1768 /* Default IV length = 0 */
1769 session->iv.length = 0;
1772 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1773 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1774 dpaa_sec_cipher_init(dev, xform, session);
1776 /* Authentication Only */
1777 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1778 xform->next == NULL) {
1779 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1780 dpaa_sec_auth_init(dev, xform, session);
1782 /* Cipher then Authenticate */
1783 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1784 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1785 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1786 dpaa_sec_cipher_init(dev, xform, session);
1787 dpaa_sec_auth_init(dev, xform->next, session);
1789 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1793 /* Authenticate then Cipher */
1794 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1795 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1796 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1797 dpaa_sec_auth_init(dev, xform, session);
1798 dpaa_sec_cipher_init(dev, xform->next, session);
1800 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1804 /* AEAD operation for AES-GCM kind of Algorithms */
1805 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1806 xform->next == NULL) {
1807 dpaa_sec_aead_init(dev, xform, session);
1810 DPAA_SEC_ERR("Invalid crypto type");
1813 session->ctx_pool = internals->ctx_pool;
1814 rte_spinlock_lock(&internals->lock);
1815 session->inq = dpaa_sec_attach_rxq(internals);
1816 rte_spinlock_unlock(&internals->lock);
1817 if (session->inq == NULL) {
1818 DPAA_SEC_ERR("unable to attach sec queue");
1825 rte_free(session->cipher_key.data);
1826 rte_free(session->auth_key.data);
1827 memset(session, 0, sizeof(dpaa_sec_session));
1833 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1834 struct rte_crypto_sym_xform *xform,
1835 struct rte_cryptodev_sym_session *sess,
1836 struct rte_mempool *mempool)
1838 void *sess_private_data;
1841 PMD_INIT_FUNC_TRACE();
1843 if (rte_mempool_get(mempool, &sess_private_data)) {
1844 DPAA_SEC_ERR("Couldn't get object from session mempool");
1848 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1850 DPAA_SEC_ERR("failed to configure session parameters");
1852 /* Return session to mempool */
1853 rte_mempool_put(mempool, sess_private_data);
1857 set_sym_session_private_data(sess, dev->driver_id,
1864 /** Clear the memory of session so it doesn't leave key material behind */
1866 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1867 struct rte_cryptodev_sym_session *sess)
1869 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1870 uint8_t index = dev->driver_id;
1871 void *sess_priv = get_sym_session_private_data(sess, index);
1873 PMD_INIT_FUNC_TRACE();
1875 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1878 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1881 dpaa_sec_detach_rxq(qi, s->inq);
1882 rte_free(s->cipher_key.data);
1883 rte_free(s->auth_key.data);
1884 memset(s, 0, sizeof(dpaa_sec_session));
1885 set_sym_session_private_data(sess, index, NULL);
1886 rte_mempool_put(sess_mp, sess_priv);
1891 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1892 struct rte_security_session_conf *conf,
1895 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1896 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1897 struct rte_crypto_auth_xform *auth_xform;
1898 struct rte_crypto_cipher_xform *cipher_xform;
1899 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1901 PMD_INIT_FUNC_TRACE();
1903 memset(session, 0, sizeof(dpaa_sec_session));
1904 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1905 cipher_xform = &conf->crypto_xform->cipher;
1906 auth_xform = &conf->crypto_xform->next->auth;
1908 auth_xform = &conf->crypto_xform->auth;
1909 cipher_xform = &conf->crypto_xform->next->cipher;
1911 session->proto_alg = conf->protocol;
1912 session->cipher_key.data = rte_zmalloc(NULL,
1913 cipher_xform->key.length,
1914 RTE_CACHE_LINE_SIZE);
1915 if (session->cipher_key.data == NULL &&
1916 cipher_xform->key.length > 0) {
1917 DPAA_SEC_ERR("No Memory for cipher key");
1921 session->cipher_key.length = cipher_xform->key.length;
1922 session->auth_key.data = rte_zmalloc(NULL,
1923 auth_xform->key.length,
1924 RTE_CACHE_LINE_SIZE);
1925 if (session->auth_key.data == NULL &&
1926 auth_xform->key.length > 0) {
1927 DPAA_SEC_ERR("No Memory for auth key");
1928 rte_free(session->cipher_key.data);
1931 session->auth_key.length = auth_xform->key.length;
1932 memcpy(session->cipher_key.data, cipher_xform->key.data,
1933 cipher_xform->key.length);
1934 memcpy(session->auth_key.data, auth_xform->key.data,
1935 auth_xform->key.length);
1937 switch (auth_xform->algo) {
1938 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1939 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1941 case RTE_CRYPTO_AUTH_MD5_HMAC:
1942 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1944 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1945 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1947 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1948 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1950 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1951 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1953 case RTE_CRYPTO_AUTH_AES_CMAC:
1954 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1956 case RTE_CRYPTO_AUTH_NULL:
1957 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1959 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1960 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1961 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1962 case RTE_CRYPTO_AUTH_SHA1:
1963 case RTE_CRYPTO_AUTH_SHA256:
1964 case RTE_CRYPTO_AUTH_SHA512:
1965 case RTE_CRYPTO_AUTH_SHA224:
1966 case RTE_CRYPTO_AUTH_SHA384:
1967 case RTE_CRYPTO_AUTH_MD5:
1968 case RTE_CRYPTO_AUTH_AES_GMAC:
1969 case RTE_CRYPTO_AUTH_KASUMI_F9:
1970 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1971 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1972 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
1976 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
1981 switch (cipher_xform->algo) {
1982 case RTE_CRYPTO_CIPHER_AES_CBC:
1983 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1985 case RTE_CRYPTO_CIPHER_3DES_CBC:
1986 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1988 case RTE_CRYPTO_CIPHER_AES_CTR:
1989 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1991 case RTE_CRYPTO_CIPHER_NULL:
1992 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1993 case RTE_CRYPTO_CIPHER_3DES_ECB:
1994 case RTE_CRYPTO_CIPHER_AES_ECB:
1995 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1996 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1997 cipher_xform->algo);
2000 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2001 cipher_xform->algo);
2005 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2006 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2007 sizeof(session->ip4_hdr));
2008 session->ip4_hdr.ip_v = IPVERSION;
2009 session->ip4_hdr.ip_hl = 5;
2010 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2011 sizeof(session->ip4_hdr));
2012 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2013 session->ip4_hdr.ip_id = 0;
2014 session->ip4_hdr.ip_off = 0;
2015 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2016 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2017 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2019 session->ip4_hdr.ip_sum = 0;
2020 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2021 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2022 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2023 (void *)&session->ip4_hdr,
2026 session->encap_pdb.options =
2027 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2028 PDBOPTS_ESP_OIHI_PDB_INL |
2030 PDBHMO_ESP_ENCAP_DTTL;
2031 session->encap_pdb.spi = ipsec_xform->spi;
2032 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2034 session->dir = DIR_ENC;
2035 } else if (ipsec_xform->direction ==
2036 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2037 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2038 session->decap_pdb.options = sizeof(struct ip) << 16;
2039 session->dir = DIR_DEC;
2042 session->ctx_pool = internals->ctx_pool;
2043 rte_spinlock_lock(&internals->lock);
2044 session->inq = dpaa_sec_attach_rxq(internals);
2045 rte_spinlock_unlock(&internals->lock);
2046 if (session->inq == NULL) {
2047 DPAA_SEC_ERR("unable to attach sec queue");
2054 rte_free(session->auth_key.data);
2055 rte_free(session->cipher_key.data);
2056 memset(session, 0, sizeof(dpaa_sec_session));
2061 dpaa_sec_security_session_create(void *dev,
2062 struct rte_security_session_conf *conf,
2063 struct rte_security_session *sess,
2064 struct rte_mempool *mempool)
2066 void *sess_private_data;
2067 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2070 if (rte_mempool_get(mempool, &sess_private_data)) {
2071 DPAA_SEC_ERR("Couldn't get object from session mempool");
2075 switch (conf->protocol) {
2076 case RTE_SECURITY_PROTOCOL_IPSEC:
2077 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2080 case RTE_SECURITY_PROTOCOL_MACSEC:
2086 DPAA_SEC_ERR("failed to configure session parameters");
2087 /* Return session to mempool */
2088 rte_mempool_put(mempool, sess_private_data);
2092 set_sec_session_private_data(sess, sess_private_data);
2097 /** Clear the memory of session so it doesn't leave key material behind */
2099 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2100 struct rte_security_session *sess)
2102 PMD_INIT_FUNC_TRACE();
2103 void *sess_priv = get_sec_session_private_data(sess);
2105 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2108 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2110 rte_free(s->cipher_key.data);
2111 rte_free(s->auth_key.data);
2112 memset(sess, 0, sizeof(dpaa_sec_session));
2113 set_sec_session_private_data(sess, NULL);
2114 rte_mempool_put(sess_mp, sess_priv);
2121 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2122 struct rte_cryptodev_config *config __rte_unused)
2126 struct dpaa_sec_dev_private *internals;
2128 PMD_INIT_FUNC_TRACE();
2130 internals = dev->data->dev_private;
2131 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2132 if (!internals->ctx_pool) {
2133 internals->ctx_pool = rte_mempool_create((const char *)str,
2136 CTX_POOL_CACHE_SIZE, 0,
2137 NULL, NULL, NULL, NULL,
2139 if (!internals->ctx_pool) {
2140 DPAA_SEC_ERR("%s create failed\n", str);
2144 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2151 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2153 PMD_INIT_FUNC_TRACE();
2158 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2160 PMD_INIT_FUNC_TRACE();
2164 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2166 struct dpaa_sec_dev_private *internals;
2168 PMD_INIT_FUNC_TRACE();
2173 internals = dev->data->dev_private;
2174 rte_mempool_free(internals->ctx_pool);
2175 internals->ctx_pool = NULL;
2181 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2182 struct rte_cryptodev_info *info)
2184 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2186 PMD_INIT_FUNC_TRACE();
2188 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2189 info->feature_flags = dev->feature_flags;
2190 info->capabilities = dpaa_sec_capabilities;
2191 info->sym.max_nb_sessions = internals->max_nb_sessions;
2192 info->driver_id = cryptodev_driver_id;
2196 static struct rte_cryptodev_ops crypto_ops = {
2197 .dev_configure = dpaa_sec_dev_configure,
2198 .dev_start = dpaa_sec_dev_start,
2199 .dev_stop = dpaa_sec_dev_stop,
2200 .dev_close = dpaa_sec_dev_close,
2201 .dev_infos_get = dpaa_sec_dev_infos_get,
2202 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2203 .queue_pair_release = dpaa_sec_queue_pair_release,
2204 .queue_pair_count = dpaa_sec_queue_pair_count,
2205 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2206 .sym_session_configure = dpaa_sec_sym_session_configure,
2207 .sym_session_clear = dpaa_sec_sym_session_clear
2210 static const struct rte_security_capability *
2211 dpaa_sec_capabilities_get(void *device __rte_unused)
2213 return dpaa_sec_security_cap;
2216 struct rte_security_ops dpaa_sec_security_ops = {
2217 .session_create = dpaa_sec_security_session_create,
2218 .session_update = NULL,
2219 .session_stats_get = NULL,
2220 .session_destroy = dpaa_sec_security_session_destroy,
2221 .set_pkt_metadata = NULL,
2222 .capabilities_get = dpaa_sec_capabilities_get
2226 dpaa_sec_uninit(struct rte_cryptodev *dev)
2228 struct dpaa_sec_dev_private *internals;
2233 internals = dev->data->dev_private;
2234 rte_free(dev->security_ctx);
2236 /* In case close has been called, internals->ctx_pool would be NULL */
2237 rte_mempool_free(internals->ctx_pool);
2238 rte_free(internals);
2240 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2241 dev->data->name, rte_socket_id());
2247 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2249 struct dpaa_sec_dev_private *internals;
2250 struct rte_security_ctx *security_instance;
2251 struct dpaa_sec_qp *qp;
2255 PMD_INIT_FUNC_TRACE();
2257 cryptodev->driver_id = cryptodev_driver_id;
2258 cryptodev->dev_ops = &crypto_ops;
2260 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2261 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2262 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2263 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2264 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2265 RTE_CRYPTODEV_FF_SECURITY |
2266 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2267 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2268 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2269 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2270 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2272 internals = cryptodev->data->dev_private;
2273 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2274 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2277 * For secondary processes, we don't initialise any further as primary
2278 * has already done this work. Only check we don't need a different
2281 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2282 DPAA_SEC_WARN("Device already init by primary process");
2286 /* Initialize security_ctx only for primary process*/
2287 security_instance = rte_malloc("rte_security_instances_ops",
2288 sizeof(struct rte_security_ctx), 0);
2289 if (security_instance == NULL)
2291 security_instance->device = (void *)cryptodev;
2292 security_instance->ops = &dpaa_sec_security_ops;
2293 security_instance->sess_cnt = 0;
2294 cryptodev->security_ctx = security_instance;
2296 rte_spinlock_init(&internals->lock);
2297 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2298 /* init qman fq for queue pair */
2299 qp = &internals->qps[i];
2300 ret = dpaa_sec_init_tx(&qp->outq);
2302 DPAA_SEC_ERR("config tx of queue pair %d", i);
2307 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2308 QMAN_FQ_FLAG_TO_DCPORTAL;
2309 for (i = 0; i < internals->max_nb_sessions; i++) {
2310 /* create rx qman fq for sessions*/
2311 ret = qman_create_fq(0, flags, &internals->inq[i]);
2312 if (unlikely(ret != 0)) {
2313 DPAA_SEC_ERR("sec qman_create_fq failed");
2318 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2322 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2324 dpaa_sec_uninit(cryptodev);
2329 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2330 struct rte_dpaa_device *dpaa_dev)
2332 struct rte_cryptodev *cryptodev;
2333 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2337 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2339 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2340 if (cryptodev == NULL)
2343 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2344 cryptodev->data->dev_private = rte_zmalloc_socket(
2345 "cryptodev private structure",
2346 sizeof(struct dpaa_sec_dev_private),
2347 RTE_CACHE_LINE_SIZE,
2350 if (cryptodev->data->dev_private == NULL)
2351 rte_panic("Cannot allocate memzone for private "
2355 dpaa_dev->crypto_dev = cryptodev;
2356 cryptodev->device = &dpaa_dev->device;
2358 /* init user callbacks */
2359 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2361 /* if sec device version is not configured */
2362 if (!rta_get_sec_era()) {
2363 const struct device_node *caam_node;
2365 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2366 const uint32_t *prop = of_get_property(caam_node,
2371 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2377 /* Invoke PMD device initialization function */
2378 retval = dpaa_sec_dev_init(cryptodev);
2382 /* In case of error, cleanup is done */
2383 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2384 rte_free(cryptodev->data->dev_private);
2386 rte_cryptodev_pmd_release_device(cryptodev);
2392 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2394 struct rte_cryptodev *cryptodev;
2397 cryptodev = dpaa_dev->crypto_dev;
2398 if (cryptodev == NULL)
2401 ret = dpaa_sec_uninit(cryptodev);
2405 return rte_cryptodev_pmd_destroy(cryptodev);
2408 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2409 .drv_type = FSL_DPAA_CRYPTO,
2411 .name = "DPAA SEC PMD"
2413 .probe = cryptodev_dpaa_sec_probe,
2414 .remove = cryptodev_dpaa_sec_remove,
2417 static struct cryptodev_driver dpaa_sec_crypto_drv;
2419 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2420 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2421 cryptodev_driver_id);
2423 RTE_INIT(dpaa_sec_init_log)
2425 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2426 if (dpaa_logtype_sec >= 0)
2427 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);