/* hash result or digest, save digest first */
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- memset(sym->auth.digest.data, 0, ses->digest_length);
qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
sg->length = sym->auth.data.length;
cpu_to_hw_sg(sg);
build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
struct rte_crypto_sym_op *sym = op->sym;
- struct rte_mbuf *mbuf = sym->m_src;
struct dpaa_sec_job *cf;
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
- phys_addr_t start_addr;
+ phys_addr_t src_start_addr, dst_start_addr;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
cf = &ctx->job;
ctx->op = op;
- start_addr = rte_pktmbuf_mtophys(mbuf);
+
+ src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
/* output */
sg = &cf->sg[0];
- qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length + ses->iv.length;
cpu_to_hw_sg(sg);
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
+ qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
sg->final = 1;
cpu_to_hw_sg(sg);
build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
struct rte_crypto_sym_op *sym = op->sym;
- struct rte_mbuf *mbuf = sym->m_src;
struct dpaa_sec_job *cf;
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
- phys_addr_t start_addr;
uint32_t length = 0;
+ phys_addr_t src_start_addr, dst_start_addr;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
- start_addr = mbuf->buf_physaddr + mbuf->data_off;
+ src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
+
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
ctx = dpaa_sec_alloc_ctx(ses);
if (!ctx)
cpu_to_hw_sg(sg);
sg++;
}
- qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset);
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
sg->length = sym->aead.data.length;
length += sg->length;
sg->final = 1;
cpu_to_hw_sg(sg);
sg++;
}
- qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset);
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
sg->length = sym->aead.data.length;
length += sg->length;
cpu_to_hw_sg(sg);
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- memset(sym->aead.digest.data, 0, ses->digest_length);
sg++;
qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg++;
qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
- start_addr + sym->aead.data.offset - ses->auth_only_len);
+ dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
sg->length = sym->aead.data.length + ses->auth_only_len;
length = sg->length;
if (is_encode(ses)) {
build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
struct rte_crypto_sym_op *sym = op->sym;
- struct rte_mbuf *mbuf = sym->m_src;
struct dpaa_sec_job *cf;
struct dpaa_sec_op_ctx *ctx;
struct qm_sg_entry *sg;
- phys_addr_t start_addr;
+ phys_addr_t src_start_addr, dst_start_addr;
uint32_t length = 0;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
- start_addr = mbuf->buf_physaddr + mbuf->data_off;
+ src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
ctx = dpaa_sec_alloc_ctx(ses);
if (!ctx)
cpu_to_hw_sg(sg);
sg++;
- qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
sg->length = sym->auth.data.length;
length += sg->length;
sg->final = 1;
sg++;
- qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
sg->length = sym->auth.data.length;
length += sg->length;
cpu_to_hw_sg(sg);
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- memset(sym->auth.digest.data, 0, ses->digest_length);
sg++;
qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
/* output */
sg++;
qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
- qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
if (is_encode(ses)) {
return ret;
}
+ /*
+ * Segmented buffer is not supported.
+ */
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -ENOTSUP;
+ }
if (is_auth_only(ses)) {
cf = build_auth_only(op, ses);
} else if (is_cipher_only(ses)) {
if (ret)
return ret;
- /* free crypto device */
- rte_cryptodev_pmd_release_device(cryptodev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(cryptodev->data->dev_private);
-
- PMD_INIT_LOG(INFO, "Closing dpaa crypto device %s",
- cryptodev->data->name);
-
- cryptodev->device = NULL;
- cryptodev->data = NULL;
-
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_dpaa_driver rte_dpaa_sec_driver = {