From: Nagadheeraj Rottela Date: Fri, 9 Oct 2020 05:57:25 +0000 (+0530) Subject: crypto/nitrox: support cipher-only operations X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=678f3eca1dfd6257023e5f433b17afdb8d662382;p=dpdk.git crypto/nitrox: support cipher-only operations This patch adds cipher only crypto operation support. Signed-off-by: Nagadheeraj Rottela --- diff --git a/doc/guides/cryptodevs/nitrox.rst b/doc/guides/cryptodevs/nitrox.rst index 91fca905a4..095e545c6a 100644 --- a/doc/guides/cryptodevs/nitrox.rst +++ b/doc/guides/cryptodevs/nitrox.rst @@ -33,8 +33,6 @@ Supported AEAD algorithms: Limitations ----------- -* AES_CBC Cipher Only combination is not supported. -* 3DES Cipher Only combination is not supported. * Session-less APIs are not supported. Installation diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index f944db20aa..d400cee3e8 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -159,6 +159,7 @@ New Features * **Updated Marvell NITROX symmetric crypto PMD.** * Added AES-GCM support. + * Added cipher only offload support. * **Updated the OCTEON TX2 crypto PMD.** diff --git a/drivers/crypto/nitrox/nitrox_sym.c b/drivers/crypto/nitrox/nitrox_sym.c index fe3ee6e239..2768bdd2e1 100644 --- a/drivers/crypto/nitrox/nitrox_sym.c +++ b/drivers/crypto/nitrox/nitrox_sym.c @@ -550,6 +550,9 @@ nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev, ctx = mp_obj; ctx->nitrox_chain = get_crypto_chain_order(xform); switch (ctx->nitrox_chain) { + case NITROX_CHAIN_CIPHER_ONLY: + cipher_xform = &xform->cipher; + break; case NITROX_CHAIN_CIPHER_AUTH: cipher_xform = &xform->cipher; auth_xform = &xform->next->auth; diff --git a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c index 113ce5d11b..fe3ca25a0c 100644 --- a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c +++ b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c @@ -247,38 +247,6 @@ softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size) sr->iv.len = sr->ctx->iv.length - salt_size; } -static int -extract_cipher_auth_digest(struct nitrox_softreq *sr, - struct nitrox_sglist *digest) -{ - struct rte_crypto_op *op = sr->op; - struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst : - op->sym->m_src; - - if (sr->ctx->req_op == NITROX_OP_DECRYPT && - unlikely(!op->sym->auth.digest.data)) - return -EINVAL; - - digest->len = sr->ctx->digest_length; - if (op->sym->auth.digest.data) { - digest->iova = op->sym->auth.digest.phys_addr; - digest->virt = op->sym->auth.digest.data; - return 0; - } - - if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset + - op->sym->auth.data.length + digest->len)) - return -EINVAL; - - digest->iova = rte_pktmbuf_iova_offset(mdst, - op->sym->auth.data.offset + - op->sym->auth.data.length); - digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *, - op->sym->auth.data.offset + - op->sym->auth.data.length); - return 0; -} - static void fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova, void *virt) @@ -340,6 +308,143 @@ create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf, return 0; } +static void +create_sgcomp(struct nitrox_sgtable *sgtbl) +{ + int i, j, nr_sgcomp; + struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp; + struct nitrox_sglist *sglist = sgtbl->sglist; + + nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4; + sgtbl->nr_sgcomp = nr_sgcomp; + for (i = 0; i < nr_sgcomp; i++, sgcomp++) { + for (j = 0; j < 4; j++, sglist++) { + sgcomp->len[j] = rte_cpu_to_be_16(sglist->len); + sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova); + } + } +} + +static int +create_cipher_inbuf(struct nitrox_softreq *sr) +{ + int err; + struct rte_crypto_op *op = sr->op; + + fill_sglist(&sr->in, sr->iv.len, sr->iv.iova, sr->iv.virt); + err = create_sglist_from_mbuf(&sr->in, op->sym->m_src, + op->sym->cipher.data.offset, + op->sym->cipher.data.length); + if (unlikely(err)) + return err; + + create_sgcomp(&sr->in); + sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp); + + return 0; +} + +static int +create_cipher_outbuf(struct nitrox_softreq *sr) +{ + struct rte_crypto_op *op = sr->op; + int err, cnt = 0; + struct rte_mbuf *m_dst = op->sym->m_dst ? op->sym->m_dst : + op->sym->m_src; + + sr->resp.orh = PENDING_SIG; + sr->out.sglist[cnt].len = sizeof(sr->resp.orh); + sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq, + resp.orh); + sr->out.sglist[cnt].virt = &sr->resp.orh; + cnt++; + + sr->out.map_bufs_cnt = cnt; + fill_sglist(&sr->out, sr->iv.len, sr->iv.iova, sr->iv.virt); + err = create_sglist_from_mbuf(&sr->out, m_dst, + op->sym->cipher.data.offset, + op->sym->cipher.data.length); + if (unlikely(err)) + return err; + + cnt = sr->out.map_bufs_cnt; + sr->resp.completion = PENDING_SIG; + sr->out.sglist[cnt].len = sizeof(sr->resp.completion); + sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq, + resp.completion); + sr->out.sglist[cnt].virt = &sr->resp.completion; + cnt++; + + RTE_VERIFY(cnt <= MAX_SGBUF_CNT); + sr->out.map_bufs_cnt = cnt; + + create_sgcomp(&sr->out); + sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp); + + return 0; +} + +static void +create_cipher_gph(uint32_t cryptlen, uint16_t ivlen, struct gphdr *gph) +{ + gph->param0 = rte_cpu_to_be_16(cryptlen); + gph->param1 = 0; + gph->param2 = rte_cpu_to_be_16(ivlen); + gph->param3 = 0; +} + +static int +process_cipher_data(struct nitrox_softreq *sr) +{ + struct rte_crypto_op *op = sr->op; + int err; + + softreq_copy_iv(sr, 0); + err = create_cipher_inbuf(sr); + if (unlikely(err)) + return err; + + err = create_cipher_outbuf(sr); + if (unlikely(err)) + return err; + + create_cipher_gph(op->sym->cipher.data.length, sr->iv.len, &sr->gph); + + return 0; +} + +static int +extract_cipher_auth_digest(struct nitrox_softreq *sr, + struct nitrox_sglist *digest) +{ + struct rte_crypto_op *op = sr->op; + struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst : + op->sym->m_src; + + if (sr->ctx->req_op == NITROX_OP_DECRYPT && + unlikely(!op->sym->auth.digest.data)) + return -EINVAL; + + digest->len = sr->ctx->digest_length; + if (op->sym->auth.digest.data) { + digest->iova = op->sym->auth.digest.phys_addr; + digest->virt = op->sym->auth.digest.data; + return 0; + } + + if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset + + op->sym->auth.data.length + digest->len)) + return -EINVAL; + + digest->iova = rte_pktmbuf_iova_offset(mdst, + op->sym->auth.data.offset + + op->sym->auth.data.length); + digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *, + op->sym->auth.data.offset + + op->sym->auth.data.length); + return 0; +} + static int create_cipher_auth_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf) @@ -408,23 +513,6 @@ create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl, return err; } -static void -create_sgcomp(struct nitrox_sgtable *sgtbl) -{ - int i, j, nr_sgcomp; - struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp; - struct nitrox_sglist *sglist = sgtbl->sglist; - - nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4; - sgtbl->nr_sgcomp = nr_sgcomp; - for (i = 0; i < nr_sgcomp; i++, sgcomp++) { - for (j = 0; j < 4; j++, sglist++) { - sgcomp->len[j] = rte_cpu_to_be_16(sglist->len); - sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova); - } - } -} - static int create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest) { @@ -661,6 +749,9 @@ process_softreq(struct nitrox_softreq *sr) int err = 0; switch (ctx->nitrox_chain) { + case NITROX_CHAIN_CIPHER_ONLY: + err = process_cipher_data(sr); + break; case NITROX_CHAIN_CIPHER_AUTH: case NITROX_CHAIN_AUTH_CIPHER: err = process_cipher_auth_data(sr);