1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cycles.h>
20 #include <rte_kvargs.h>
21 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_string_fns.h>
30 /* RTA header files */
31 #include <hw/desc/common.h>
32 #include <hw/desc/algo.h>
33 #include <hw/desc/ipsec.h>
35 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec_log.h>
39 enum rta_sec_era rta_sec_era;
41 static uint8_t cryptodev_driver_id;
43 static __thread struct rte_crypto_op **dpaa_sec_ops;
44 static __thread int dpaa_sec_op_nb;
47 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
49 if (!ctx->fd_status) {
50 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
52 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
53 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
56 /* report op status to sym->op and then free the ctx memeory */
57 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
60 static inline struct dpaa_sec_op_ctx *
61 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
63 struct dpaa_sec_op_ctx *ctx;
66 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
68 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
72 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
73 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
74 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
75 * each packet, memset is costlier than dcbz_64().
77 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
78 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
79 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
80 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
82 ctx->ctx_pool = ses->ctx_pool;
83 ctx->vtop_offset = (uint64_t) ctx
84 - rte_mempool_virt2iova(ctx);
89 static inline rte_iova_t
90 dpaa_mem_vtop(void *vaddr)
92 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
93 uint64_t vaddr_64, paddr;
96 vaddr_64 = (uint64_t)vaddr;
97 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
98 if (vaddr_64 >= memseg[i].addr_64 &&
99 vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
100 paddr = memseg[i].iova +
101 (vaddr_64 - memseg[i].addr_64);
103 return (rte_iova_t)paddr;
106 return (rte_iova_t)(NULL);
109 /* virtual address conversin when mempool support is available for ctx */
110 static inline phys_addr_t
111 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
113 return (uint64_t)vaddr - ctx->vtop_offset;
117 dpaa_mem_ptov(rte_iova_t paddr)
119 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
122 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
123 if (paddr >= memseg[i].iova &&
124 (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
125 return (void *)(memseg[i].addr_64 +
126 (paddr - memseg[i].iova));
132 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
134 const struct qm_mr_entry *msg)
136 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
137 fq->fqid, msg->ern.rc, msg->ern.seqnum);
140 /* initialize the queue with dest chan as caam chan so that
141 * all the packets in this queue could be dispatched into caam
144 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
147 struct qm_mcc_initfq fq_opts;
151 /* Clear FQ options */
152 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
154 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
155 QMAN_FQ_FLAG_TO_DCPORTAL;
157 ret = qman_create_fq(0, flags, fq_in);
158 if (unlikely(ret != 0)) {
159 PMD_INIT_LOG(ERR, "qman_create_fq failed");
163 flags = QMAN_INITFQ_FLAG_SCHED;
164 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
165 QM_INITFQ_WE_CONTEXTB;
167 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
168 fq_opts.fqd.context_b = fqid_out;
169 fq_opts.fqd.dest.channel = qm_channel_caam;
170 fq_opts.fqd.dest.wq = 0;
172 fq_in->cb.ern = ern_sec_fq_handler;
174 ret = qman_init_fq(fq_in, flags, &fq_opts);
175 if (unlikely(ret != 0))
176 PMD_INIT_LOG(ERR, "qman_init_fq failed");
181 /* something is put into in_fq and caam put the crypto result into out_fq */
182 static enum qman_cb_dqrr_result
183 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
184 struct qman_fq *fq __always_unused,
185 const struct qm_dqrr_entry *dqrr)
187 const struct qm_fd *fd;
188 struct dpaa_sec_job *job;
189 struct dpaa_sec_op_ctx *ctx;
191 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
192 return qman_cb_dqrr_defer;
194 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
195 return qman_cb_dqrr_consume;
198 /* sg is embedded in an op ctx,
199 * sg[0] is for output
202 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
203 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
204 ctx->fd_status = fd->status;
205 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
206 dpaa_sec_op_ending(ctx);
208 return qman_cb_dqrr_consume;
211 /* caam result is put into this queue */
213 dpaa_sec_init_tx(struct qman_fq *fq)
216 struct qm_mcc_initfq opts;
219 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
220 QMAN_FQ_FLAG_DYNAMIC_FQID;
222 ret = qman_create_fq(0, flags, fq);
224 PMD_INIT_LOG(ERR, "qman_create_fq failed");
228 memset(&opts, 0, sizeof(opts));
229 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
230 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
232 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
234 fq->cb.dqrr = dqrr_out_fq_cb_rx;
235 fq->cb.ern = ern_sec_fq_handler;
237 ret = qman_init_fq(fq, 0, &opts);
239 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
246 static inline int is_cipher_only(dpaa_sec_session *ses)
248 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
249 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
252 static inline int is_auth_only(dpaa_sec_session *ses)
254 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
255 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
258 static inline int is_aead(dpaa_sec_session *ses)
260 return ((ses->cipher_alg == 0) &&
261 (ses->auth_alg == 0) &&
262 (ses->aead_alg != 0));
265 static inline int is_auth_cipher(dpaa_sec_session *ses)
267 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
268 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
271 static inline int is_encode(dpaa_sec_session *ses)
273 return ses->dir == DIR_ENC;
276 static inline int is_decode(dpaa_sec_session *ses)
278 return ses->dir == DIR_DEC;
282 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
284 switch (ses->auth_alg) {
285 case RTE_CRYPTO_AUTH_NULL:
286 ses->digest_length = 0;
288 case RTE_CRYPTO_AUTH_MD5_HMAC:
289 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
290 alginfo_a->algmode = OP_ALG_AAI_HMAC;
292 case RTE_CRYPTO_AUTH_SHA1_HMAC:
293 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
294 alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 case RTE_CRYPTO_AUTH_SHA224_HMAC:
297 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
298 alginfo_a->algmode = OP_ALG_AAI_HMAC;
300 case RTE_CRYPTO_AUTH_SHA256_HMAC:
301 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
302 alginfo_a->algmode = OP_ALG_AAI_HMAC;
304 case RTE_CRYPTO_AUTH_SHA384_HMAC:
305 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 case RTE_CRYPTO_AUTH_SHA512_HMAC:
309 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
310 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
318 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
320 switch (ses->cipher_alg) {
321 case RTE_CRYPTO_CIPHER_NULL:
323 case RTE_CRYPTO_CIPHER_AES_CBC:
324 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
325 alginfo_c->algmode = OP_ALG_AAI_CBC;
327 case RTE_CRYPTO_CIPHER_3DES_CBC:
328 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
329 alginfo_c->algmode = OP_ALG_AAI_CBC;
331 case RTE_CRYPTO_CIPHER_AES_CTR:
332 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
333 alginfo_c->algmode = OP_ALG_AAI_CTR;
336 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
341 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
343 switch (ses->aead_alg) {
344 case RTE_CRYPTO_AEAD_AES_GCM:
345 alginfo->algtype = OP_ALG_ALGSEL_AES;
346 alginfo->algmode = OP_ALG_AAI_GCM;
349 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
354 /* prepare command block of the session */
356 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
358 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
359 uint32_t shared_desc_len = 0;
360 struct sec_cdb *cdb = &ses->qp->cdb;
362 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
368 memset(cdb, 0, sizeof(struct sec_cdb));
370 if (is_cipher_only(ses)) {
371 caam_cipher_alg(ses, &alginfo_c);
372 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
373 PMD_TX_LOG(ERR, "not supported cipher alg\n");
377 alginfo_c.key = (uint64_t)ses->cipher_key.data;
378 alginfo_c.keylen = ses->cipher_key.length;
379 alginfo_c.key_enc_flags = 0;
380 alginfo_c.key_type = RTA_DATA_IMM;
382 shared_desc_len = cnstr_shdsc_blkcipher(
388 } else if (is_auth_only(ses)) {
389 caam_auth_alg(ses, &alginfo_a);
390 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
391 PMD_TX_LOG(ERR, "not supported auth alg\n");
395 alginfo_a.key = (uint64_t)ses->auth_key.data;
396 alginfo_a.keylen = ses->auth_key.length;
397 alginfo_a.key_enc_flags = 0;
398 alginfo_a.key_type = RTA_DATA_IMM;
400 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
404 } else if (is_aead(ses)) {
405 caam_aead_alg(ses, &alginfo);
406 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
407 PMD_TX_LOG(ERR, "not supported aead alg\n");
410 alginfo.key = (uint64_t)ses->aead_key.data;
411 alginfo.keylen = ses->aead_key.length;
412 alginfo.key_enc_flags = 0;
413 alginfo.key_type = RTA_DATA_IMM;
415 if (ses->dir == DIR_ENC)
416 shared_desc_len = cnstr_shdsc_gcm_encap(
417 cdb->sh_desc, true, swap,
422 shared_desc_len = cnstr_shdsc_gcm_decap(
423 cdb->sh_desc, true, swap,
428 caam_cipher_alg(ses, &alginfo_c);
429 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
430 PMD_TX_LOG(ERR, "not supported cipher alg\n");
434 alginfo_c.key = (uint64_t)ses->cipher_key.data;
435 alginfo_c.keylen = ses->cipher_key.length;
436 alginfo_c.key_enc_flags = 0;
437 alginfo_c.key_type = RTA_DATA_IMM;
439 caam_auth_alg(ses, &alginfo_a);
440 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
441 PMD_TX_LOG(ERR, "not supported auth alg\n");
445 alginfo_a.key = (uint64_t)ses->auth_key.data;
446 alginfo_a.keylen = ses->auth_key.length;
447 alginfo_a.key_enc_flags = 0;
448 alginfo_a.key_type = RTA_DATA_IMM;
450 cdb->sh_desc[0] = alginfo_c.keylen;
451 cdb->sh_desc[1] = alginfo_a.keylen;
452 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
454 (unsigned int *)cdb->sh_desc,
455 &cdb->sh_desc[2], 2);
458 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
461 if (cdb->sh_desc[2] & 1)
462 alginfo_c.key_type = RTA_DATA_IMM;
464 alginfo_c.key = (uint64_t)dpaa_mem_vtop(
465 (void *)alginfo_c.key);
466 alginfo_c.key_type = RTA_DATA_PTR;
468 if (cdb->sh_desc[2] & (1<<1))
469 alginfo_a.key_type = RTA_DATA_IMM;
471 alginfo_a.key = (uint64_t)dpaa_mem_vtop(
472 (void *)alginfo_a.key);
473 alginfo_a.key_type = RTA_DATA_PTR;
479 /* Auth_only_len is set as 0 here and it will be overwritten
480 * in fd for each packet.
482 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
483 true, swap, &alginfo_c, &alginfo_a,
485 ses->digest_length, ses->dir);
487 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
488 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
489 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
494 static inline unsigned int
495 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
497 unsigned int pkts = 0;
499 struct qm_mcr_queryfq_np np;
500 enum qman_fq_state state;
504 qman_query_fq_np(fq, &np);
506 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
508 vdqcr |= QM_VDQCR_EXACT;
509 ret = qman_volatile_dequeue(fq, 0, vdqcr);
513 pkts += qman_poll_dqrr(len);
514 qman_fq_state(fq, &state, &flags);
515 } while (flags & QMAN_FQ_STATE_VDQCR);
520 /* qp is lockless, should be accessed by only one thread */
522 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
530 if (unlikely(nb_ops > DPAA_SEC_BURST))
531 nb_ops = DPAA_SEC_BURST;
533 return dpaa_volatile_deq(fq, nb_ops, 1);
538 * |<----data_len------->|
539 * |ip_header|ah_header|icv|payload|
544 static inline struct dpaa_sec_job *
545 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
547 struct rte_crypto_sym_op *sym = op->sym;
548 struct rte_mbuf *mbuf = sym->m_src;
549 struct dpaa_sec_job *cf;
550 struct dpaa_sec_op_ctx *ctx;
551 struct qm_sg_entry *sg;
552 rte_iova_t start_addr;
555 ctx = dpaa_sec_alloc_ctx(ses);
561 old_digest = ctx->digest;
563 start_addr = rte_pktmbuf_iova(mbuf);
566 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
567 sg->length = ses->digest_length;
572 if (is_decode(ses)) {
573 /* need to extend the input to a compound frame */
575 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
576 sg->length = sym->auth.data.length + ses->digest_length;
581 /* hash result or digest, save digest first */
582 rte_memcpy(old_digest, sym->auth.digest.data,
584 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
585 sg->length = sym->auth.data.length;
588 /* let's check digest by hw */
589 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
591 qm_sg_entry_set64(sg, start_addr);
592 sg->length = ses->digest_length;
596 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
597 sg->length = sym->auth.data.length;
605 static inline struct dpaa_sec_job *
606 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
608 struct rte_crypto_sym_op *sym = op->sym;
609 struct dpaa_sec_job *cf;
610 struct dpaa_sec_op_ctx *ctx;
611 struct qm_sg_entry *sg;
612 rte_iova_t src_start_addr, dst_start_addr;
613 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
616 ctx = dpaa_sec_alloc_ctx(ses);
623 src_start_addr = rte_pktmbuf_iova(sym->m_src);
626 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
628 dst_start_addr = src_start_addr;
632 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
633 sg->length = sym->cipher.data.length + ses->iv.length;
639 /* need to extend the input to a compound frame */
642 sg->length = sym->cipher.data.length + ses->iv.length;
643 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
647 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
648 sg->length = ses->iv.length;
652 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
653 sg->length = sym->cipher.data.length;
660 static inline struct dpaa_sec_job *
661 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
663 struct rte_crypto_sym_op *sym = op->sym;
664 struct dpaa_sec_job *cf;
665 struct dpaa_sec_op_ctx *ctx;
666 struct qm_sg_entry *sg;
668 rte_iova_t src_start_addr, dst_start_addr;
669 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
672 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
675 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
677 dst_start_addr = src_start_addr;
679 ctx = dpaa_sec_alloc_ctx(ses);
687 rte_prefetch0(cf->sg);
689 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
690 if (is_encode(ses)) {
691 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
692 sg->length = ses->iv.length;
693 length += sg->length;
697 if (ses->auth_only_len) {
698 qm_sg_entry_set64(sg,
699 dpaa_mem_vtop(sym->aead.aad.data));
700 sg->length = ses->auth_only_len;
701 length += sg->length;
705 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
706 sg->length = sym->aead.data.length;
707 length += sg->length;
711 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
712 sg->length = ses->iv.length;
713 length += sg->length;
717 if (ses->auth_only_len) {
718 qm_sg_entry_set64(sg,
719 dpaa_mem_vtop(sym->aead.aad.data));
720 sg->length = ses->auth_only_len;
721 length += sg->length;
725 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
726 sg->length = sym->aead.data.length;
727 length += sg->length;
730 memcpy(ctx->digest, sym->aead.digest.data,
734 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
735 sg->length = ses->digest_length;
736 length += sg->length;
740 /* input compound frame */
741 cf->sg[1].length = length;
742 cf->sg[1].extension = 1;
744 cpu_to_hw_sg(&cf->sg[1]);
748 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
749 qm_sg_entry_set64(sg,
750 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
751 sg->length = sym->aead.data.length + ses->auth_only_len;
753 if (is_encode(ses)) {
755 /* set auth output */
757 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
758 sg->length = ses->digest_length;
759 length += sg->length;
764 /* output compound frame */
765 cf->sg[0].length = length;
766 cf->sg[0].extension = 1;
767 cpu_to_hw_sg(&cf->sg[0]);
772 static inline struct dpaa_sec_job *
773 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
775 struct rte_crypto_sym_op *sym = op->sym;
776 struct dpaa_sec_job *cf;
777 struct dpaa_sec_op_ctx *ctx;
778 struct qm_sg_entry *sg;
779 rte_iova_t src_start_addr, dst_start_addr;
781 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
784 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
786 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
788 dst_start_addr = src_start_addr;
790 ctx = dpaa_sec_alloc_ctx(ses);
798 rte_prefetch0(cf->sg);
800 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
801 if (is_encode(ses)) {
802 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
803 sg->length = ses->iv.length;
804 length += sg->length;
808 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
809 sg->length = sym->auth.data.length;
810 length += sg->length;
814 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
815 sg->length = ses->iv.length;
816 length += sg->length;
821 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
822 sg->length = sym->auth.data.length;
823 length += sg->length;
826 memcpy(ctx->digest, sym->auth.digest.data,
830 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
831 sg->length = ses->digest_length;
832 length += sg->length;
836 /* input compound frame */
837 cf->sg[1].length = length;
838 cf->sg[1].extension = 1;
840 cpu_to_hw_sg(&cf->sg[1]);
844 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
845 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
846 sg->length = sym->cipher.data.length;
848 if (is_encode(ses)) {
850 /* set auth output */
852 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
853 sg->length = ses->digest_length;
854 length += sg->length;
859 /* output compound frame */
860 cf->sg[0].length = length;
861 cf->sg[0].extension = 1;
862 cpu_to_hw_sg(&cf->sg[0]);
868 dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
870 struct dpaa_sec_job *cf;
871 dpaa_sec_session *ses;
874 uint32_t auth_only_len = op->sym->auth.data.length -
875 op->sym->cipher.data.length;
877 ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
878 cryptodev_driver_id);
880 if (unlikely(!qp->ses || qp->ses != ses)) {
883 ret = dpaa_sec_prep_cdb(ses);
889 * Segmented buffer is not supported.
891 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
892 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
895 if (is_auth_only(ses)) {
896 cf = build_auth_only(op, ses);
897 } else if (is_cipher_only(ses)) {
898 cf = build_cipher_only(op, ses);
899 } else if (is_aead(ses)) {
900 cf = build_cipher_auth_gcm(op, ses);
901 auth_only_len = ses->auth_only_len;
902 } else if (is_auth_cipher(ses)) {
903 cf = build_cipher_auth(op, ses);
905 PMD_TX_LOG(ERR, "not supported sec op");
911 memset(&fd, 0, sizeof(struct qm_fd));
912 qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
913 fd._format1 = qm_fd_compound;
914 fd.length29 = 2 * sizeof(struct qm_sg_entry);
915 /* Auth_only_len is set as 0 in descriptor and it is overwritten
916 * here in the fd.cmd which will update the DPOVRD reg.
919 fd.cmd = 0x80000000 | auth_only_len;
921 ret = qman_enqueue(&qp->inq, &fd, 0);
928 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
931 /* Function to transmit the frames to given device and queuepair */
934 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
937 if (unlikely(nb_ops == 0))
940 /*Prepare each packet which is to be sent*/
941 for (loop = 0; loop < nb_ops; loop++) {
942 if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
943 PMD_TX_LOG(ERR, "sessionless crypto op not supported");
946 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
950 dpaa_qp->tx_pkts += num_tx;
951 dpaa_qp->tx_errs += nb_ops - num_tx;
957 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
961 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
963 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
965 dpaa_qp->rx_pkts += num_rx;
966 dpaa_qp->rx_errs += nb_ops - num_rx;
968 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
973 /** Release queue pair */
975 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
978 struct dpaa_sec_dev_private *internals;
979 struct dpaa_sec_qp *qp = NULL;
981 PMD_INIT_FUNC_TRACE();
983 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
985 internals = dev->data->dev_private;
986 if (qp_id >= internals->max_nb_queue_pairs) {
987 PMD_INIT_LOG(ERR, "Max supported qpid %d",
988 internals->max_nb_queue_pairs);
992 qp = &internals->qps[qp_id];
993 qp->internals = NULL;
994 dev->data->queue_pairs[qp_id] = NULL;
999 /** Setup a queue pair */
1001 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1002 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1003 __rte_unused int socket_id,
1004 __rte_unused struct rte_mempool *session_pool)
1006 struct dpaa_sec_dev_private *internals;
1007 struct dpaa_sec_qp *qp = NULL;
1009 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1010 dev, qp_id, qp_conf);
1012 internals = dev->data->dev_private;
1013 if (qp_id >= internals->max_nb_queue_pairs) {
1014 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1015 internals->max_nb_queue_pairs);
1019 qp = &internals->qps[qp_id];
1020 qp->internals = internals;
1021 dev->data->queue_pairs[qp_id] = qp;
1026 /** Start queue pair */
1028 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1029 __rte_unused uint16_t queue_pair_id)
1031 PMD_INIT_FUNC_TRACE();
1036 /** Stop queue pair */
1038 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1039 __rte_unused uint16_t queue_pair_id)
1041 PMD_INIT_FUNC_TRACE();
1046 /** Return the number of allocated queue pairs */
1048 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1050 PMD_INIT_FUNC_TRACE();
1052 return dev->data->nb_queue_pairs;
1055 /** Returns the size of session structure */
1057 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1059 PMD_INIT_FUNC_TRACE();
1061 return sizeof(dpaa_sec_session);
1065 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1066 struct rte_crypto_sym_xform *xform,
1067 dpaa_sec_session *session)
1069 session->cipher_alg = xform->cipher.algo;
1070 session->iv.length = xform->cipher.iv.length;
1071 session->iv.offset = xform->cipher.iv.offset;
1072 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1073 RTE_CACHE_LINE_SIZE);
1074 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1075 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1078 session->cipher_key.length = xform->cipher.key.length;
1080 memcpy(session->cipher_key.data, xform->cipher.key.data,
1081 xform->cipher.key.length);
1082 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1089 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1090 struct rte_crypto_sym_xform *xform,
1091 dpaa_sec_session *session)
1093 session->auth_alg = xform->auth.algo;
1094 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1095 RTE_CACHE_LINE_SIZE);
1096 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1097 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1100 session->auth_key.length = xform->auth.key.length;
1101 session->digest_length = xform->auth.digest_length;
1103 memcpy(session->auth_key.data, xform->auth.key.data,
1104 xform->auth.key.length);
1105 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1112 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1113 struct rte_crypto_sym_xform *xform,
1114 dpaa_sec_session *session)
1116 session->aead_alg = xform->aead.algo;
1117 session->iv.length = xform->aead.iv.length;
1118 session->iv.offset = xform->aead.iv.offset;
1119 session->auth_only_len = xform->aead.aad_length;
1120 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1121 RTE_CACHE_LINE_SIZE);
1122 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1123 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1126 session->aead_key.length = xform->aead.key.length;
1127 session->digest_length = xform->aead.digest_length;
1129 memcpy(session->aead_key.data, xform->aead.key.data,
1130 xform->aead.key.length);
1131 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1138 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1140 dpaa_sec_session *sess = ses;
1141 struct dpaa_sec_qp *qp;
1143 PMD_INIT_FUNC_TRACE();
1145 qp = dev->data->queue_pairs[qp_id];
1146 if (qp->ses != NULL) {
1147 PMD_INIT_LOG(ERR, "qp in-use by another session\n");
1154 return dpaa_sec_prep_cdb(sess);
1158 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1160 dpaa_sec_session *sess = ses;
1161 struct dpaa_sec_qp *qp;
1163 PMD_INIT_FUNC_TRACE();
1165 qp = dev->data->queue_pairs[qp_id];
1166 if (qp->ses != NULL) {
1172 PMD_DRV_LOG(ERR, "No session attached to qp");
1177 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1178 struct rte_crypto_sym_xform *xform, void *sess)
1180 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1181 dpaa_sec_session *session = sess;
1183 PMD_INIT_FUNC_TRACE();
1185 if (unlikely(sess == NULL)) {
1186 RTE_LOG(ERR, PMD, "invalid session struct\n");
1190 /* Default IV length = 0 */
1191 session->iv.length = 0;
1194 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1195 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1196 dpaa_sec_cipher_init(dev, xform, session);
1198 /* Authentication Only */
1199 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1200 xform->next == NULL) {
1201 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1202 dpaa_sec_auth_init(dev, xform, session);
1204 /* Cipher then Authenticate */
1205 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1206 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1207 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1208 dpaa_sec_cipher_init(dev, xform, session);
1209 dpaa_sec_auth_init(dev, xform->next, session);
1211 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1215 /* Authenticate then Cipher */
1216 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1217 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1218 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1219 dpaa_sec_auth_init(dev, xform, session);
1220 dpaa_sec_cipher_init(dev, xform->next, session);
1222 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1226 /* AEAD operation for AES-GCM kind of Algorithms */
1227 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1228 xform->next == NULL) {
1229 dpaa_sec_aead_init(dev, xform, session);
1232 PMD_DRV_LOG(ERR, "Invalid crypto type");
1235 session->ctx_pool = internals->ctx_pool;
1241 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1242 struct rte_crypto_sym_xform *xform,
1243 struct rte_cryptodev_sym_session *sess,
1244 struct rte_mempool *mempool)
1246 void *sess_private_data;
1249 PMD_INIT_FUNC_TRACE();
1251 if (rte_mempool_get(mempool, &sess_private_data)) {
1253 "Couldn't get object from session mempool");
1257 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1259 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1260 "session parameters");
1262 /* Return session to mempool */
1263 rte_mempool_put(mempool, sess_private_data);
1267 set_session_private_data(sess, dev->driver_id,
1273 /** Clear the memory of session so it doesn't leave key material behind */
1275 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1276 struct rte_cryptodev_sym_session *sess)
1278 PMD_INIT_FUNC_TRACE();
1279 uint8_t index = dev->driver_id;
1280 void *sess_priv = get_session_private_data(sess, index);
1281 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1284 rte_free(s->cipher_key.data);
1285 rte_free(s->auth_key.data);
1286 memset(s, 0, sizeof(dpaa_sec_session));
1287 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1288 set_session_private_data(sess, index, NULL);
1289 rte_mempool_put(sess_mp, sess_priv);
1294 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1295 struct rte_cryptodev_config *config __rte_unused)
1297 PMD_INIT_FUNC_TRACE();
1303 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1305 PMD_INIT_FUNC_TRACE();
1310 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1312 PMD_INIT_FUNC_TRACE();
1316 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1318 PMD_INIT_FUNC_TRACE();
1323 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1324 struct rte_cryptodev_info *info)
1326 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1328 PMD_INIT_FUNC_TRACE();
1330 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1331 info->feature_flags = dev->feature_flags;
1332 info->capabilities = dpaa_sec_capabilities;
1333 info->sym.max_nb_sessions = internals->max_nb_sessions;
1334 info->sym.max_nb_sessions_per_qp =
1335 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
1336 info->driver_id = cryptodev_driver_id;
1340 static struct rte_cryptodev_ops crypto_ops = {
1341 .dev_configure = dpaa_sec_dev_configure,
1342 .dev_start = dpaa_sec_dev_start,
1343 .dev_stop = dpaa_sec_dev_stop,
1344 .dev_close = dpaa_sec_dev_close,
1345 .dev_infos_get = dpaa_sec_dev_infos_get,
1346 .queue_pair_setup = dpaa_sec_queue_pair_setup,
1347 .queue_pair_release = dpaa_sec_queue_pair_release,
1348 .queue_pair_start = dpaa_sec_queue_pair_start,
1349 .queue_pair_stop = dpaa_sec_queue_pair_stop,
1350 .queue_pair_count = dpaa_sec_queue_pair_count,
1351 .session_get_size = dpaa_sec_session_get_size,
1352 .session_configure = dpaa_sec_session_configure,
1353 .session_clear = dpaa_sec_session_clear,
1354 .qp_attach_session = dpaa_sec_qp_attach_sess,
1355 .qp_detach_session = dpaa_sec_qp_detach_sess,
1359 dpaa_sec_uninit(struct rte_cryptodev *dev)
1361 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1366 rte_mempool_free(internals->ctx_pool);
1367 rte_free(internals);
1369 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1370 dev->data->name, rte_socket_id());
1376 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1378 struct dpaa_sec_dev_private *internals;
1379 struct dpaa_sec_qp *qp;
1384 PMD_INIT_FUNC_TRACE();
1386 cryptodev->driver_id = cryptodev_driver_id;
1387 cryptodev->dev_ops = &crypto_ops;
1389 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1390 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1391 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1392 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1393 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1395 internals = cryptodev->data->dev_private;
1396 internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
1397 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1399 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1400 /* init qman fq for queue pair */
1401 qp = &internals->qps[i];
1402 ret = dpaa_sec_init_tx(&qp->outq);
1404 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
1407 ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
1408 qman_fq_fqid(&qp->outq));
1410 PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
1415 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1416 internals->ctx_pool = rte_mempool_create((const char *)str,
1419 CTX_POOL_CACHE_SIZE, 0,
1420 NULL, NULL, NULL, NULL,
1422 if (!internals->ctx_pool) {
1423 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1427 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1431 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1433 dpaa_sec_uninit(cryptodev);
1438 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1439 struct rte_dpaa_device *dpaa_dev)
1441 struct rte_cryptodev *cryptodev;
1442 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1446 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1448 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1449 if (cryptodev == NULL)
1452 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1453 cryptodev->data->dev_private = rte_zmalloc_socket(
1454 "cryptodev private structure",
1455 sizeof(struct dpaa_sec_dev_private),
1456 RTE_CACHE_LINE_SIZE,
1459 if (cryptodev->data->dev_private == NULL)
1460 rte_panic("Cannot allocate memzone for private "
1464 dpaa_dev->crypto_dev = cryptodev;
1465 cryptodev->device = &dpaa_dev->device;
1466 cryptodev->device->driver = &dpaa_drv->driver;
1468 /* init user callbacks */
1469 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1471 /* if sec device version is not configured */
1472 if (!rta_get_sec_era()) {
1473 const struct device_node *caam_node;
1475 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1476 const uint32_t *prop = of_get_property(caam_node,
1481 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1487 /* Invoke PMD device initialization function */
1488 retval = dpaa_sec_dev_init(cryptodev);
1492 /* In case of error, cleanup is done */
1493 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1494 rte_free(cryptodev->data->dev_private);
1496 rte_cryptodev_pmd_release_device(cryptodev);
1502 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1504 struct rte_cryptodev *cryptodev;
1507 cryptodev = dpaa_dev->crypto_dev;
1508 if (cryptodev == NULL)
1511 ret = dpaa_sec_uninit(cryptodev);
1515 return rte_cryptodev_pmd_destroy(cryptodev);
1518 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1519 .drv_type = FSL_DPAA_CRYPTO,
1521 .name = "DPAA SEC PMD"
1523 .probe = cryptodev_dpaa_sec_probe,
1524 .remove = cryptodev_dpaa_sec_remove,
1527 static struct cryptodev_driver dpaa_sec_crypto_drv;
1529 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1530 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1531 cryptodev_driver_id);