crypto/dpaa_sec: optimize virtual address conversion
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
index 16155b1..6b57ce3 100644 (file)
@@ -1,34 +1,8 @@
-/*-
- *   BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2017 NXP.
+ *   Copyright 2017 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of NXP nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <fcntl.h>
@@ -106,6 +80,8 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
        dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
 
        ctx->ctx_pool = ses->ctx_pool;
+       ctx->vtop_offset = (uint64_t) ctx
+                               - rte_mempool_virt2iova(ctx);
 
        return ctx;
 }
@@ -121,7 +97,7 @@ dpaa_mem_vtop(void *vaddr)
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
                if (vaddr_64 >= memseg[i].addr_64 &&
                    vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
-                       paddr = memseg[i].phys_addr +
+                       paddr = memseg[i].iova +
                                (vaddr_64 - memseg[i].addr_64);
 
                        return (rte_iova_t)paddr;
@@ -130,6 +106,13 @@ dpaa_mem_vtop(void *vaddr)
        return (rte_iova_t)(NULL);
 }
 
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
+{
+       return (uint64_t)vaddr - ctx->vtop_offset;
+}
+
 static inline void *
 dpaa_mem_ptov(rte_iova_t paddr)
 {
@@ -137,10 +120,10 @@ dpaa_mem_ptov(rte_iova_t paddr)
        int i;
 
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
-               if (paddr >= memseg[i].phys_addr &&
-                   (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+               if (paddr >= memseg[i].iova &&
+                   (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
                        return (void *)(memseg[i].addr_64 +
-                                       (paddr - memseg[i].phys_addr));
+                                       (paddr - memseg[i].iova));
        }
        return NULL;
 }
@@ -589,7 +572,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        if (is_decode(ses)) {
                /* need to extend the input to a compound frame */
                sg->extension = 1;
-               qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
                sg->length = sym->auth.data.length + ses->digest_length;
                sg->final = 1;
                cpu_to_hw_sg(sg);
@@ -603,7 +586,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
                cpu_to_hw_sg(sg);
 
                /* let's check digest by hw */
-               start_addr = dpaa_mem_vtop(old_digest);
+               start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
                sg++;
                qm_sg_entry_set64(sg, start_addr);
                sg->length = ses->digest_length;
@@ -657,7 +640,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        sg->extension = 1;
        sg->final = 1;
        sg->length = sym->cipher.data.length + ses->iv.length;
-       qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+       qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
        cpu_to_hw_sg(sg);
 
        sg = &cf->sg[2];
@@ -703,7 +686,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
        /* input */
        rte_prefetch0(cf->sg);
        sg = &cf->sg[2];
-       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
        if (is_encode(ses)) {
                qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
                sg->length = ses->iv.length;
@@ -748,7 +731,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
                       ses->digest_length);
                sg++;
 
-               qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
                sg->length = ses->digest_length;
                length += sg->length;
                sg->final = 1;
@@ -762,7 +745,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
        /* output */
        sg++;
-       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
        qm_sg_entry_set64(sg,
                dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
        sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -814,7 +797,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
        /* input */
        rte_prefetch0(cf->sg);
        sg = &cf->sg[2];
-       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
        if (is_encode(ses)) {
                qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
                sg->length = ses->iv.length;
@@ -844,7 +827,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
                       ses->digest_length);
                sg++;
 
-               qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
                sg->length = ses->digest_length;
                length += sg->length;
                sg->final = 1;
@@ -858,7 +841,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
        /* output */
        sg++;
-       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
        qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
        sg->length = sym->cipher.data.length;
        length = sg->length;