From: Fan Zhang Date: Mon, 25 Mar 2019 13:51:20 +0000 (+0000) Subject: crypto/aesni_mb: enable out of place processing X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e;p=dpdk.git crypto/aesni_mb: enable out of place processing Add out-of-place processing, i.e. different source and destination m_bufs, plus related capability update, tests and documentation. Signed-off-by: Fiona Trahe Signed-off-by: Paul Luse Signed-off-by: Fan Zhang Acked-by: Fiona Trahe Acked-by: Pablo de Lara --- diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst index 47f2ecc2f6..55aa7cd948 100644 --- a/doc/guides/cryptodevs/aesni_mb.rst +++ b/doc/guides/cryptodevs/aesni_mb.rst @@ -56,7 +56,6 @@ Limitations ----------- * Chained mbufs are not supported. -* Only in-place is currently supported (destination address is the same as source address). * RTE_CRYPTO_AEAD_AES_GCM only works properly when the multi-buffer library is 0.51.0 or newer. * RTE_CRYPTO_HASH_AES_GMAC is supported by library version v0.51 or later. diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini index f72957451f..93a534f19c 100644 --- a/doc/guides/cryptodevs/features/aesni_mb.ini +++ b/doc/guides/cryptodevs/features/aesni_mb.ini @@ -11,6 +11,7 @@ CPU AVX = Y CPU AVX2 = Y CPU AVX512 = Y CPU AESNI = Y +OOP LB In LB Out = Y ; ; Supported crypto algorithms of the 'aesni_mb' crypto driver. diff --git a/doc/guides/rel_notes/release_19_05.rst b/doc/guides/rel_notes/release_19_05.rst index bdad1ddbee..3aad8d6a5f 100644 --- a/doc/guides/rel_notes/release_19_05.rst +++ b/doc/guides/rel_notes/release_19_05.rst @@ -111,6 +111,10 @@ New Features Added support for AES-XTS with 128 and 256 bit AES keys. +* **Updated AESNI-MB PMD.** + + Added support for out-of-place operations. + * **Updated the IPsec library.** The IPsec library has been updated with AES-CTR and 3DES-CBC cipher algorithms diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 48d6ac0029..8bcfe7939c 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -739,6 +739,56 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) return sess; } +static inline uint64_t +auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, + uint32_t oop) +{ + struct rte_mbuf *m_src, *m_dst; + uint8_t *p_src, *p_dst; + uintptr_t u_src, u_dst; + uint32_t cipher_end, auth_end; + + /* Only cipher then hash needs special calculation. */ + if (!oop || session->chain_order != CIPHER_HASH) + return op->sym->auth.data.offset; + + m_src = op->sym->m_src; + m_dst = op->sym->m_dst; + + p_src = rte_pktmbuf_mtod(m_src, uint8_t *); + p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *); + u_src = (uintptr_t)p_src; + u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset; + + /** + * Copy the content between cipher offset and auth offset for generating + * correct digest. + */ + if (op->sym->cipher.data.offset > op->sym->auth.data.offset) + memcpy(p_dst + op->sym->auth.data.offset, + p_src + op->sym->auth.data.offset, + op->sym->cipher.data.offset - + op->sym->auth.data.offset); + + /** + * Copy the content between (cipher offset + length) and (auth offset + + * length) for generating correct digest + */ + cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length; + auth_end = op->sym->auth.data.offset + op->sym->auth.data.length; + if (cipher_end < auth_end) + memcpy(p_dst + cipher_end, p_src + cipher_end, + auth_end - cipher_end); + + /** + * Since intel-ipsec-mb only supports positive values, + * we need to deduct the correct offset between src and dst. + */ + + return u_src < u_dst ? (u_dst - u_src) : + (UINT64_MAX - u_src + u_dst + 1); +} + /** * Process a crypto operation and complete a JOB_AES_HMAC job structure for * submission to the multi buffer library for processing. @@ -757,7 +807,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, { struct rte_mbuf *m_src = op->sym->m_src, *m_dst; struct aesni_mb_session *session; - uint16_t m_offset = 0; + uint32_t m_offset, oop; session = get_session(qp, op); if (session == NULL) { @@ -840,31 +890,26 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, } } - /* Mutable crypto operation parameters */ - if (op->sym->m_dst) { - m_src = m_dst = op->sym->m_dst; - - /* append space for output data to mbuf */ - char *odata = rte_pktmbuf_append(m_dst, - rte_pktmbuf_data_len(op->sym->m_src)); - if (odata == NULL) { - AESNI_MB_LOG(ERR, "failed to allocate space in destination " - "mbuf for source data"); - op->status = RTE_CRYPTO_OP_STATUS_ERROR; - return -1; - } - - memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*), - rte_pktmbuf_data_len(op->sym->m_src)); - } else { + if (!op->sym->m_dst) { + /* in-place operation */ m_dst = m_src; - if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC && - session->cipher.mode == GCM)) - m_offset = op->sym->aead.data.offset; - else - m_offset = op->sym->cipher.data.offset; + oop = 0; + } else if (op->sym->m_dst == op->sym->m_src) { + /* in-place operation */ + m_dst = m_src; + oop = 0; + } else { + /* out-of-place operation */ + m_dst = op->sym->m_dst; + oop = 1; } + if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC && + session->cipher.mode == GCM)) + m_offset = op->sym->aead.data.offset; + else + m_offset = op->sym->cipher.data.offset; + /* Set digest output location */ if (job->hash_alg != NULL_HASH && session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { @@ -893,7 +938,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, /* Set IV parameters */ job->iv_len_in_bytes = session->iv.length; - /* Data Parameter */ + /* Data Parameters */ job->src = rte_pktmbuf_mtod(m_src, uint8_t *); job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); @@ -937,7 +982,8 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, op->sym->cipher.data.offset; job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; - job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset; + job->hash_start_src_offset_in_bytes = auth_start_offset(op, + session, oop); job->msg_len_to_hash_in_bytes = op->sym->auth.data.length; job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, @@ -962,7 +1008,7 @@ static inline void generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op, struct aesni_mb_session *sess) { - /* No extra copy neeed */ + /* No extra copy needed */ if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len)) return; @@ -1217,7 +1263,9 @@ cryptodev_aesni_mb_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_CPU_AESNI; + RTE_CRYPTODEV_FF_CPU_AESNI | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + mb_mgr = alloc_mb_mgr(0); if (mb_mgr == NULL)