X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fdpaa_sec%2Fdpaa_sec.c;h=6b57ce36542d3d80b5637092026621ba6e50cc61;hb=fcf6702909f0d36e2086be9fc37399e597d0fa8a;hp=b54803c845d29423f3d926f0d168583067192fd2;hpb=74658bda33819742ef6e1af41afa53ae535a8c24;p=dpdk.git diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index b54803c845..6b57ce3654 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -1,34 +1,8 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP. + * Copyright 2017 NXP * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of NXP nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include @@ -106,11 +80,13 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) dcbz_64(&ctx->job.sg[SG_CACHELINE_3]); ctx->ctx_pool = ses->ctx_pool; + ctx->vtop_offset = (uint64_t) ctx + - rte_mempool_virt2iova(ctx); return ctx; } -static inline phys_addr_t +static inline rte_iova_t dpaa_mem_vtop(void *vaddr) { const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); @@ -121,26 +97,33 @@ dpaa_mem_vtop(void *vaddr) for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { if (vaddr_64 >= memseg[i].addr_64 && vaddr_64 < memseg[i].addr_64 + memseg[i].len) { - paddr = memseg[i].phys_addr + + paddr = memseg[i].iova + (vaddr_64 - memseg[i].addr_64); - return (phys_addr_t)paddr; + return (rte_iova_t)paddr; } } - return (phys_addr_t)(NULL); + return (rte_iova_t)(NULL); +} + +/* virtual address conversin when mempool support is available for ctx */ +static inline phys_addr_t +dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr) +{ + return (uint64_t)vaddr - ctx->vtop_offset; } static inline void * -dpaa_mem_ptov(phys_addr_t paddr) +dpaa_mem_ptov(rte_iova_t paddr) { const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); int i; for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (paddr >= memseg[i].phys_addr && - (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len) + if (paddr >= memseg[i].iova && + (char *)paddr < (char *)memseg[i].iova + memseg[i].len) return (void *)(memseg[i].addr_64 + - (paddr - memseg[i].phys_addr)); + (paddr - memseg[i].iova)); } return NULL; } @@ -158,7 +141,7 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused, * all the packets in this queue could be dispatched into caam */ static int -dpaa_sec_init_rx(struct qman_fq *fq_in, phys_addr_t hwdesc, +dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, uint32_t fqid_out) { struct qm_mcc_initfq fq_opts; @@ -566,7 +549,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) struct dpaa_sec_job *cf; struct dpaa_sec_op_ctx *ctx; struct qm_sg_entry *sg; - phys_addr_t start_addr; + rte_iova_t start_addr; uint8_t *old_digest; ctx = dpaa_sec_alloc_ctx(ses); @@ -577,7 +560,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) ctx->op = op; old_digest = ctx->digest; - start_addr = rte_pktmbuf_mtophys(mbuf); + start_addr = rte_pktmbuf_iova(mbuf); /* output */ sg = &cf->sg[0]; qm_sg_entry_set64(sg, sym->auth.digest.phys_addr); @@ -589,7 +572,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) if (is_decode(ses)) { /* need to extend the input to a compound frame */ sg->extension = 1; - qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); + qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); sg->length = sym->auth.data.length + ses->digest_length; sg->final = 1; cpu_to_hw_sg(sg); @@ -603,7 +586,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); /* let's check digest by hw */ - start_addr = dpaa_mem_vtop(old_digest); + start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); sg++; qm_sg_entry_set64(sg, start_addr); sg->length = ses->digest_length; @@ -623,11 +606,10 @@ static inline struct dpaa_sec_job * build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) { struct rte_crypto_sym_op *sym = op->sym; - struct rte_mbuf *mbuf = sym->m_src; struct dpaa_sec_job *cf; struct dpaa_sec_op_ctx *ctx; struct qm_sg_entry *sg; - phys_addr_t start_addr; + rte_iova_t src_start_addr, dst_start_addr; uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, ses->iv.offset); @@ -637,11 +619,17 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) cf = &ctx->job; ctx->op = op; - start_addr = rte_pktmbuf_mtophys(mbuf); + + src_start_addr = rte_pktmbuf_iova(sym->m_src); + + if (sym->m_dst) + dst_start_addr = rte_pktmbuf_iova(sym->m_dst); + else + dst_start_addr = src_start_addr; /* output */ sg = &cf->sg[0]; - qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset); + qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); sg->length = sym->cipher.data.length + ses->iv.length; cpu_to_hw_sg(sg); @@ -652,7 +640,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) sg->extension = 1; sg->final = 1; sg->length = sym->cipher.data.length + ses->iv.length; - qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); + qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); cpu_to_hw_sg(sg); sg = &cf->sg[2]; @@ -661,7 +649,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); sg++; - qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset); + qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset); sg->length = sym->cipher.data.length; sg->final = 1; cpu_to_hw_sg(sg); @@ -673,16 +661,20 @@ static inline struct dpaa_sec_job * build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) { struct rte_crypto_sym_op *sym = op->sym; - struct rte_mbuf *mbuf = sym->m_src; struct dpaa_sec_job *cf; struct dpaa_sec_op_ctx *ctx; struct qm_sg_entry *sg; - phys_addr_t start_addr; uint32_t length = 0; + rte_iova_t src_start_addr, dst_start_addr; uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, ses->iv.offset); - start_addr = mbuf->buf_physaddr + mbuf->data_off; + src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; + + if (sym->m_dst) + dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; + else + dst_start_addr = src_start_addr; ctx = dpaa_sec_alloc_ctx(ses); if (!ctx) @@ -694,7 +686,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input */ rte_prefetch0(cf->sg); sg = &cf->sg[2]; - qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); + qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); if (is_encode(ses)) { qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); sg->length = ses->iv.length; @@ -710,7 +702,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); sg++; } - qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset); + qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); sg->length = sym->aead.data.length; length += sg->length; sg->final = 1; @@ -730,7 +722,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); sg++; } - qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset); + qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset); sg->length = sym->aead.data.length; length += sg->length; cpu_to_hw_sg(sg); @@ -739,7 +731,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) ses->digest_length); sg++; - qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); sg->length = ses->digest_length; length += sg->length; sg->final = 1; @@ -753,9 +745,9 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output */ sg++; - qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); + qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); qm_sg_entry_set64(sg, - start_addr + sym->aead.data.offset - ses->auth_only_len); + dst_start_addr + sym->aead.data.offset - ses->auth_only_len); sg->length = sym->aead.data.length + ses->auth_only_len; length = sg->length; if (is_encode(ses)) { @@ -781,16 +773,19 @@ static inline struct dpaa_sec_job * build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) { struct rte_crypto_sym_op *sym = op->sym; - struct rte_mbuf *mbuf = sym->m_src; struct dpaa_sec_job *cf; struct dpaa_sec_op_ctx *ctx; struct qm_sg_entry *sg; - phys_addr_t start_addr; + rte_iova_t src_start_addr, dst_start_addr; uint32_t length = 0; uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, ses->iv.offset); - start_addr = mbuf->buf_physaddr + mbuf->data_off; + src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off; + if (sym->m_dst) + dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off; + else + dst_start_addr = src_start_addr; ctx = dpaa_sec_alloc_ctx(ses); if (!ctx) @@ -802,7 +797,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input */ rte_prefetch0(cf->sg); sg = &cf->sg[2]; - qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); + qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); if (is_encode(ses)) { qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); sg->length = ses->iv.length; @@ -810,7 +805,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); sg++; - qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset); + qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); sg->length = sym->auth.data.length; length += sg->length; sg->final = 1; @@ -823,7 +818,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; - qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset); + qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset); sg->length = sym->auth.data.length; length += sg->length; cpu_to_hw_sg(sg); @@ -832,7 +827,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) ses->digest_length); sg++; - qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); sg->length = ses->digest_length; length += sg->length; sg->final = 1; @@ -846,8 +841,8 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output */ sg++; - qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); - qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset); + qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); sg->length = sym->cipher.data.length; length = sg->length; if (is_encode(ses)) { @@ -890,6 +885,13 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp) return ret; } + /* + * Segmented buffer is not supported. + */ + if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return -ENOTSUP; + } if (is_auth_only(ses)) { cf = build_auth_only(op, ses); } else if (is_cipher_only(ses)) { @@ -1510,19 +1512,7 @@ cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev) if (ret) return ret; - /* free crypto device */ - rte_cryptodev_pmd_release_device(cryptodev); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(cryptodev->data->dev_private); - - PMD_INIT_LOG(INFO, "Closing dpaa crypto device %s", - cryptodev->data->name); - - cryptodev->device = NULL; - cryptodev->data = NULL; - - return 0; + return rte_cryptodev_pmd_destroy(cryptodev); } static struct rte_dpaa_driver rte_dpaa_sec_driver = {