/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017-2018 NXP
*
*/
enum rta_sec_era rta_sec_era;
+int dpaa_logtype_sec;
+
static uint8_t cryptodev_driver_id;
static __thread struct rte_crypto_op **dpaa_sec_ops;
if (!ctx->fd_status) {
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
if (!ctx || retval) {
- PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
+ DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
return NULL;
}
/*
dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
ctx->ctx_pool = ses->ctx_pool;
- ctx->vtop_offset = (uint64_t) ctx
+ ctx->vtop_offset = (size_t) ctx
- rte_mempool_virt2iova(ctx);
return ctx;
static inline rte_iova_t
dpaa_mem_vtop(void *vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- uint64_t vaddr_64, paddr;
- int i;
-
- vaddr_64 = (uint64_t)vaddr;
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (vaddr_64 >= memseg[i].addr_64 &&
- vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
- paddr = memseg[i].iova +
- (vaddr_64 - memseg[i].addr_64);
-
- return (rte_iova_t)paddr;
- }
- }
- return (rte_iova_t)(NULL);
-}
+ const struct rte_memseg *ms;
-/* virtual address conversin when mempool support is available for ctx */
-static inline phys_addr_t
-dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
-{
- return (uint64_t)vaddr - ctx->vtop_offset;
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
}
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- int i;
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].iova &&
- (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
- return (void *)(memseg[i].addr_64 +
- (paddr - memseg[i].iova));
- }
- return NULL;
+ return rte_mem_iova2virt(paddr);
}
static void
struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
- RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
- fq->fqid, msg->ern.rc, msg->ern.seqnum);
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
}
/* initialize the queue with dest chan as caam chan so that
fq_in->cb.ern = ern_sec_fq_handler;
- PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+ DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
ret = qman_init_fq(fq_in, flags, &fq_opts);
if (unlikely(ret != 0))
- PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
+ DPAA_SEC_ERR("qman_init_fq failed %d", ret);
return ret;
}
ret = qman_create_fq(0, flags, fq);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ DPAA_SEC_ERR("qman_create_fq failed");
return ret;
}
ret = qman_init_fq(fq, 0, &opts);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "unable to init caam source fq!");
+ DPAA_SEC_ERR("unable to init caam source fq!");
return ret;
}
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
+ DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
}
}
alginfo_c->algmode = OP_ALG_AAI_CTR;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
+ DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
}
}
alginfo->algmode = OP_ALG_AAI_GCM;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
+ DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
}
}
dpaa_sec_prep_cdb(dpaa_sec_session *ses)
{
struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
- uint32_t shared_desc_len = 0;
+ int32_t shared_desc_len = 0;
struct sec_cdb *cdb = &ses->cdb;
int err;
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
} else if (is_auth_only(ses)) {
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
} else if (is_aead(ses)) {
caam_aead_alg(ses, &alginfo);
if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported aead alg\n");
+ DPAA_SEC_ERR("not supported aead alg");
return -ENOTSUP;
}
- alginfo.key = (uint64_t)ses->aead_key.data;
+ alginfo.key = (size_t)ses->aead_key.data;
alginfo.keylen = ses->aead_key.length;
alginfo.key_enc_flags = 0;
alginfo.key_type = RTA_DATA_IMM;
} else {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
&cdb->sh_desc[2], 2);
if (err < 0) {
- PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
return err;
}
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_c.key);
+ alginfo_c.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_a.key);
+ alginfo_a.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
cdb->sh_desc[0] = 0;
ses->digest_length, ses->dir);
}
}
+
+ if (shared_desc_len < 0) {
+ DPAA_SEC_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
cdb->sh_hdr.hi.field.idlen = shared_desc_len;
cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
return 0;
}
-static inline unsigned int
-dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
{
+ struct qman_fq *fq;
unsigned int pkts = 0;
int ret;
- struct qm_mcr_queryfq_np np;
- enum qman_fq_state state;
- uint32_t flags;
- uint32_t vdqcr;
-
- qman_query_fq_np(fq, &np);
- if (np.frm_cnt) {
- vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
- if (exact)
- vdqcr |= QM_VDQCR_EXACT;
- ret = qman_volatile_dequeue(fq, 0, vdqcr);
- if (ret)
- return 0;
- do {
- pkts += qman_poll_dqrr(len);
- qman_fq_state(fq, &state, &flags);
- } while (flags & QMAN_FQ_STATE_VDQCR);
- }
+ struct qm_dqrr_entry *dq;
+
+ fq = &qp->outq;
+ ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+ DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+ if (ret)
+ return 0;
+
+ do {
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_crypto_op *op;
+
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+
+ fd = &dq->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ op = ctx->op;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ op->sym->m_src->pkt_len = len;
+ op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ops[pkts++] = op;
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
return pkts;
}
-/* qp is lockless, should be accessed by only one thread */
-static int
-dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
- struct qman_fq *fq;
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ phys_addr_t start_addr;
+ uint8_t *old_digest, extra_segs;
- fq = &qp->outq;
- dpaa_sec_op_nb = 0;
- dpaa_sec_ops = ops;
+ if (is_decode(ses))
+ extra_segs = 3;
+ else
+ extra_segs = 2;
+
+ if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+ out_sg->length = ses->digest_length;
+ cpu_to_hw_sg(out_sg);
- if (unlikely(nb_ops > DPAA_SEC_BURST))
- nb_ops = DPAA_SEC_BURST;
+ /* input */
+ in_sg = &cf->sg[1];
+ /* need to extend the input to a compound frame */
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->auth.data.length;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+
+ /* 1st seg */
+ sg = in_sg + 1;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ /* Digest verification case */
+ cpu_to_hw_sg(sg);
+ sg++;
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ start_addr = dpaa_mem_vtop(old_digest);
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ in_sg->length += ses->digest_length;
+ } else {
+ /* Digest calculation case */
+ sg->length -= ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ cpu_to_hw_sg(in_sg);
- return dpaa_volatile_deq(fq, nb_ops, 1);
+ return cf;
}
/**
if (is_decode(ses)) {
/* need to extend the input to a compound frame */
sg->extension = 1;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
sg->length = sym->auth.data.length + ses->digest_length;
sg->final = 1;
cpu_to_hw_sg(sg);
cpu_to_hw_sg(sg);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
return cf;
}
+static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 3;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ out_sg->length = sym->cipher.data.length;
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 1st seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
static inline struct dpaa_sec_job *
build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
sg->extension = 1;
sg->final = 1;
sg->length = sym->cipher.data.length + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
return cf;
}
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (ses->auth_only_len)
+ req_segs++;
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->aead.data.length + ses->auth_only_len
+ + ses->digest_length;
+ else
+ out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset +
+ ses->auth_only_len;
+ sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len;
+ else
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg auth only */
+ if (ses->auth_only_len) {
+ sg++;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ cpu_to_hw_sg(sg);
+ }
+
+ /* 3rd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset;
+ sg->offset = sym->aead.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
static inline struct dpaa_sec_job *
build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
sg->length = sym->aead.data.length + ses->auth_only_len;
return cf;
}
+static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->auth.data.length + ses->digest_length;
+ else
+ out_sg->length = sym->auth.data.length;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->auth.data.length;
+ else
+ in_sg->length = ses->iv.length + sym->auth.data.length
+ + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ sg->length -= ses->digest_length;
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
static inline struct dpaa_sec_job *
build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
return cf;
}
-static int
-dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
-{
- struct dpaa_sec_job *cf;
- dpaa_sec_session *ses;
- struct qm_fd fd;
- int ret;
- uint32_t auth_only_len = op->sym->auth.data.length -
- op->sym->cipher.data.length;
-
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- ses = (dpaa_sec_session *)get_session_private_data(
- op->sym->session, cryptodev_driver_id);
- else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
- ses = (dpaa_sec_session *)get_sec_session_private_data(
- op->sym->sec_session);
- else
- return -ENOTSUP;
-
- if (unlikely(!ses->qp || ses->qp != qp)) {
- PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
- if (dpaa_sec_attach_sess_q(qp, ses))
- return -1;
- }
-
- /*
- * Segmented buffer is not supported.
- */
- if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -ENOTSUP;
- }
- if (is_auth_only(ses)) {
- cf = build_auth_only(op, ses);
- } else if (is_cipher_only(ses)) {
- cf = build_cipher_only(op, ses);
- } else if (is_aead(ses)) {
- cf = build_cipher_auth_gcm(op, ses);
- auth_only_len = ses->auth_only_len;
- } else if (is_auth_cipher(ses)) {
- cf = build_cipher_auth(op, ses);
- } else if (is_proto_ipsec(ses)) {
- cf = build_proto(op, ses);
- } else {
- PMD_TX_LOG(ERR, "not supported sec op");
- return -ENOTSUP;
- }
- if (unlikely(!cf))
- return -ENOMEM;
-
- memset(&fd, 0, sizeof(struct qm_fd));
- qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
- fd._format1 = qm_fd_compound;
- fd.length29 = 2 * sizeof(struct qm_sg_entry);
- /* Auth_only_len is set as 0 in descriptor and it is overwritten
- * here in the fd.cmd which will update the DPOVRD reg.
- */
- if (auth_only_len)
- fd.cmd = 0x80000000 | auth_only_len;
- do {
- ret = qman_enqueue(ses->inq, &fd, 0);
- } while (ret != 0);
-
- return 0;
-}
-
static uint16_t
dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
/* Function to transmit the frames to given device and queuepair */
uint32_t loop;
- int32_t ret;
struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
uint16_t num_tx = 0;
+ struct qm_fd fds[DPAA_SEC_BURST], *fd;
+ uint32_t frames_to_send;
+ struct rte_crypto_op *op;
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ uint32_t auth_only_len;
+ struct qman_fq *inq[DPAA_SEC_BURST];
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+ DPAA_SEC_BURST : nb_ops;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ op = *(ops++);
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (dpaa_sec_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ DPAA_SEC_DP_ERR(
+ "sessionless crypto op not supported");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
+ ses->qp, qp);
+ if (dpaa_sec_attach_sess_q(qp, ses)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
- if (unlikely(nb_ops == 0))
- return 0;
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else {
+ DPAA_SEC_DP_ERR("not supported ops");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ } else {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only_sg(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only_sg(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm_sg(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth_sg(op, ses);
+ } else {
+ DPAA_SEC_DP_ERR("not supported ops");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
+ if (unlikely(!cf)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
- /*Prepare each packet which is to be sent*/
- for (loop = 0; loop < nb_ops; loop++) {
- if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- PMD_TX_LOG(ERR, "sessionless crypto op not supported");
- return 0;
+ fd = &fds[loop];
+ inq[loop] = ses->inq;
+ fd->opaque_addr = 0;
+ fd->cmd = 0;
+ qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
+ fd->_format1 = qm_fd_compound;
+ fd->length29 = 2 * sizeof(struct qm_sg_entry);
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the fd.cmd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ fd->cmd = 0x80000000 | auth_only_len;
+
+ }
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+ frames_to_send - loop);
}
- ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
- if (!ret)
- num_tx++;
+ nb_ops -= frames_to_send;
+ num_tx += frames_to_send;
}
+
dpaa_qp->tx_pkts += num_tx;
dpaa_qp->tx_errs += nb_ops - num_tx;
dpaa_qp->rx_pkts += num_rx;
dpaa_qp->rx_errs += nb_ops - num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
return num_rx;
}
PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
struct dpaa_sec_dev_private *internals;
struct dpaa_sec_qp *qp = NULL;
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
- dev, qp_id, qp_conf);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
return 0;
}
-/** Start queue pair */
-static int
-dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
-/** Stop queue pair */
-static int
-dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
session->cipher_key.length = xform->cipher.key.length;
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
return -ENOMEM;
}
session->auth_key.length = xform->auth.key.length;
session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for aead key\n");
+ DPAA_SEC_ERR("No Memory for aead key\n");
return -ENOMEM;
}
session->aead_key.length = xform->aead.key.length;
return &qi->inq[i];
}
}
- PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+ DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
return NULL;
}
sess->qp = qp;
ret = dpaa_sec_prep_cdb(sess);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+ DPAA_SEC_ERR("Unable to prepare sec cdb");
return -1;
}
-
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_SEC_ERR("Failure in affining portal");
+ return ret;
+ }
+ }
ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
- PMD_DRV_LOG(ERR, "Unable to init sec queue");
+ DPAA_SEC_ERR("Unable to init sec queue");
return ret;
}
-static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
- uint16_t qp_id __rte_unused,
- void *ses __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
- uint16_t qp_id __rte_unused,
- void *ses)
-{
- dpaa_sec_session *sess = ses;
- struct dpaa_sec_dev_private *qi = dev->data->dev_private;
-
- PMD_INIT_FUNC_TRACE();
-
- if (sess->inq)
- dpaa_sec_detach_rxq(qi, sess->inq);
- sess->inq = NULL;
-
- sess->qp = NULL;
-
- return 0;
-}
-
static int
dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
dpaa_sec_cipher_init(dev, xform, session);
dpaa_sec_auth_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
dpaa_sec_auth_init(dev, xform, session);
dpaa_sec_cipher_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
dpaa_sec_aead_init(dev, xform, session);
} else {
- PMD_DRV_LOG(ERR, "Invalid crypto type");
+ DPAA_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
}
PMD_INIT_FUNC_TRACE();
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
- "session parameters");
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
return -ENOMEM;
}
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
auth_xform->algo);
goto out;
}
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
cipher_xform->algo);
goto out;
}
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto out;
}
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
static int
-dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+dpaa_sec_dev_configure(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config __rte_unused)
{
+
+ char str[20];
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
return 0;
}
}
static int
-dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
+dpaa_sec_dev_close(struct rte_cryptodev *dev)
{
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
return 0;
}
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->sym.max_nb_sessions_per_qp =
- RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
- RTE_DPAA_MAX_NB_SEC_QPS;
info->driver_id = cryptodev_driver_id;
}
}
.dev_infos_get = dpaa_sec_dev_infos_get,
.queue_pair_setup = dpaa_sec_queue_pair_setup,
.queue_pair_release = dpaa_sec_queue_pair_release,
- .queue_pair_start = dpaa_sec_queue_pair_start,
- .queue_pair_stop = dpaa_sec_queue_pair_stop,
.queue_pair_count = dpaa_sec_queue_pair_count,
.session_get_size = dpaa_sec_session_get_size,
.session_configure = dpaa_sec_session_configure,
- .session_clear = dpaa_sec_session_clear,
- .qp_attach_session = dpaa_sec_qp_attach_sess,
- .qp_detach_session = dpaa_sec_qp_detach_sess,
+ .session_clear = dpaa_sec_session_clear
};
static const struct rte_security_capability *
static int
dpaa_sec_uninit(struct rte_cryptodev *dev)
{
- struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct dpaa_sec_dev_private *internals;
if (dev == NULL)
return -ENODEV;
+ internals = dev->data->dev_private;
rte_free(dev->security_ctx);
+ /* In case close has been called, internals->ctx_pool would be NULL */
rte_mempool_free(internals->ctx_pool);
rte_free(internals);
- PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
struct dpaa_sec_qp *qp;
uint32_t i, flags;
int ret;
- char str[20];
PMD_INIT_FUNC_TRACE();
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_SECURITY;
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
internals = cryptodev->data->dev_private;
internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA_SEC_WARN("Device already init by primary process");
return 0;
}
qp = &internals->qps[i];
ret = dpaa_sec_init_tx(&qp->outq);
if (ret) {
- PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
+ DPAA_SEC_ERR("config tx of queue pair %d", i);
goto init_error;
}
}
/* create rx qman fq for sessions*/
ret = qman_create_fq(0, flags, &internals->inq[i]);
if (unlikely(ret != 0)) {
- PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
+ DPAA_SEC_ERR("sec qman_create_fq failed");
goto init_error;
}
}
- sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
- internals->ctx_pool = rte_mempool_create((const char *)str,
- CTX_POOL_NUM_BUFS,
- CTX_POOL_BUF_SIZE,
- CTX_POOL_CACHE_SIZE, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!internals->ctx_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
- goto init_error;
- }
-
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
dpaa_sec_uninit(cryptodev);
return -EFAULT;
static struct cryptodev_driver dpaa_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
cryptodev_driver_id);
+
+RTE_INIT(dpaa_sec_init_log);
+static void
+dpaa_sec_init_log(void)
+{
+ dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
+ if (dpaa_logtype_sec >= 0)
+ rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
+}