mbuf: rename data address helpers to IOVA
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
index 7b9a683..1d9d03a 100644 (file)
@@ -577,7 +577,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        ctx->op = op;
        old_digest = ctx->digest;
 
-       start_addr = rte_pktmbuf_mtophys(mbuf);
+       start_addr = rte_pktmbuf_iova(mbuf);
        /* output */
        sg = &cf->sg[0];
        qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
@@ -598,7 +598,6 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
                /* hash result or digest, save digest first */
                rte_memcpy(old_digest, sym->auth.digest.data,
                           ses->digest_length);
-               memset(sym->auth.digest.data, 0, ses->digest_length);
                qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
                sg->length = sym->auth.data.length;
                cpu_to_hw_sg(sg);
@@ -624,11 +623,10 @@ static inline struct dpaa_sec_job *
 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
        struct rte_crypto_sym_op *sym = op->sym;
-       struct rte_mbuf *mbuf = sym->m_src;
        struct dpaa_sec_job *cf;
        struct dpaa_sec_op_ctx *ctx;
        struct qm_sg_entry *sg;
-       phys_addr_t start_addr;
+       phys_addr_t src_start_addr, dst_start_addr;
        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
                        ses->iv.offset);
 
@@ -638,11 +636,17 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
        cf = &ctx->job;
        ctx->op = op;
-       start_addr = rte_pktmbuf_mtophys(mbuf);
+
+       src_start_addr = rte_pktmbuf_iova(sym->m_src);
+
+       if (sym->m_dst)
+               dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+       else
+               dst_start_addr = src_start_addr;
 
        /* output */
        sg = &cf->sg[0];
-       qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
+       qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
        sg->length = sym->cipher.data.length + ses->iv.length;
        cpu_to_hw_sg(sg);
 
@@ -662,7 +666,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        cpu_to_hw_sg(sg);
 
        sg++;
-       qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
+       qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
        sg->length = sym->cipher.data.length;
        sg->final = 1;
        cpu_to_hw_sg(sg);
@@ -674,16 +678,20 @@ static inline struct dpaa_sec_job *
 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
        struct rte_crypto_sym_op *sym = op->sym;
-       struct rte_mbuf *mbuf = sym->m_src;
        struct dpaa_sec_job *cf;
        struct dpaa_sec_op_ctx *ctx;
        struct qm_sg_entry *sg;
-       phys_addr_t start_addr;
        uint32_t length = 0;
+       phys_addr_t src_start_addr, dst_start_addr;
        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
                        ses->iv.offset);
 
-       start_addr = mbuf->buf_physaddr + mbuf->data_off;
+       src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
+
+       if (sym->m_dst)
+               dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
+       else
+               dst_start_addr = src_start_addr;
 
        ctx = dpaa_sec_alloc_ctx(ses);
        if (!ctx)
@@ -711,7 +719,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
                        cpu_to_hw_sg(sg);
                        sg++;
                }
-               qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset);
+               qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
                sg->length = sym->aead.data.length;
                length += sg->length;
                sg->final = 1;
@@ -731,14 +739,13 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
                        cpu_to_hw_sg(sg);
                        sg++;
                }
-               qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset);
+               qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
                sg->length = sym->aead.data.length;
                length += sg->length;
                cpu_to_hw_sg(sg);
 
                memcpy(ctx->digest, sym->aead.digest.data,
                       ses->digest_length);
-               memset(sym->aead.digest.data, 0, ses->digest_length);
                sg++;
 
                qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
@@ -757,7 +764,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
        sg++;
        qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
        qm_sg_entry_set64(sg,
-               start_addr + sym->aead.data.offset - ses->auth_only_len);
+               dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
        sg->length = sym->aead.data.length + ses->auth_only_len;
        length = sg->length;
        if (is_encode(ses)) {
@@ -783,16 +790,19 @@ static inline struct dpaa_sec_job *
 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
        struct rte_crypto_sym_op *sym = op->sym;
-       struct rte_mbuf *mbuf = sym->m_src;
        struct dpaa_sec_job *cf;
        struct dpaa_sec_op_ctx *ctx;
        struct qm_sg_entry *sg;
-       phys_addr_t start_addr;
+       phys_addr_t src_start_addr, dst_start_addr;
        uint32_t length = 0;
        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
                        ses->iv.offset);
 
-       start_addr = mbuf->buf_physaddr + mbuf->data_off;
+       src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+       if (sym->m_dst)
+               dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+       else
+               dst_start_addr = src_start_addr;
 
        ctx = dpaa_sec_alloc_ctx(ses);
        if (!ctx)
@@ -812,7 +822,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
                cpu_to_hw_sg(sg);
 
                sg++;
-               qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+               qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
                sg->length = sym->auth.data.length;
                length += sg->length;
                sg->final = 1;
@@ -825,14 +835,13 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
                sg++;
 
-               qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+               qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
                sg->length = sym->auth.data.length;
                length += sg->length;
                cpu_to_hw_sg(sg);
 
                memcpy(ctx->digest, sym->auth.digest.data,
                       ses->digest_length);
-               memset(sym->auth.digest.data, 0, ses->digest_length);
                sg++;
 
                qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
@@ -850,7 +859,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
        /* output */
        sg++;
        qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
-       qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
+       qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
        sg->length = sym->cipher.data.length;
        length = sg->length;
        if (is_encode(ses)) {
@@ -893,6 +902,13 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
                        return ret;
        }
 
+       /*
+        * Segmented buffer is not supported.
+        */
+       if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+               return -ENOTSUP;
+       }
        if (is_auth_only(ses)) {
                cf = build_auth_only(op, ses);
        } else if (is_cipher_only(ses)) {
@@ -1513,19 +1529,7 @@ cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
        if (ret)
                return ret;
 
-       /* free crypto device */
-       rte_cryptodev_pmd_release_device(cryptodev);
-
-       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-               rte_free(cryptodev->data->dev_private);
-
-       PMD_INIT_LOG(INFO, "Closing dpaa crypto device %s",
-                    cryptodev->data->name);
-
-       cryptodev->device = NULL;
-       cryptodev->data = NULL;
-
-       return 0;
+       return rte_cryptodev_pmd_destroy(cryptodev);
 }
 
 static struct rte_dpaa_driver rte_dpaa_sec_driver = {