From 8d928d47a29a8a8fa33ca94bd34391f892f831ef Mon Sep 17 00:00:00 2001 From: Fan Zhang Date: Sun, 11 Oct 2020 01:38:51 +0100 Subject: [PATCH] cryptodev: change crypto symmetric vector structure This patch updates ``rte_crypto_sym_vec`` structure to add support for both cpu_crypto synchronous operation and asynchronous raw data-path APIs. The patch also includes AESNI-MB and AESNI-GCM PMD changes, unit test changes and documentation updates. Signed-off-by: Fan Zhang Acked-by: Konstantin Ananyev Acked-by: Akhil Goyal --- app/test/test_cryptodev.c | 25 ++++++++------ doc/guides/prog_guide/cryptodev_lib.rst | 3 +- doc/guides/rel_notes/release_20_11.rst | 3 ++ drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 18 +++++----- drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 9 +++-- lib/librte_cryptodev/rte_crypto_sym.h | 40 ++++++++++++++++------ lib/librte_ipsec/esp_inb.c | 12 +++---- lib/librte_ipsec/esp_outb.c | 12 +++---- lib/librte_ipsec/misc.h | 6 ++-- 9 files changed, 79 insertions(+), 49 deletions(-) diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index 6da199dfb6..5b6231bdae 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -151,11 +151,11 @@ static void process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op) { int32_t n, st; - void *iv; struct rte_crypto_sym_op *sop; union rte_crypto_sym_ofs ofs; struct rte_crypto_sgl sgl; struct rte_crypto_sym_vec symvec; + struct rte_crypto_va_iova_ptr iv_ptr, aad_ptr, digest_ptr; struct rte_crypto_vec vec[UINT8_MAX]; sop = op->sym; @@ -171,13 +171,17 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op) sgl.vec = vec; sgl.num = n; symvec.sgl = &sgl; - iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); - symvec.iv = &iv; - symvec.aad = (void **)&sop->aead.aad.data; - symvec.digest = (void **)&sop->aead.digest.data; + symvec.iv = &iv_ptr; + symvec.digest = &digest_ptr; + symvec.aad = &aad_ptr; symvec.status = &st; symvec.num = 1; + /* for CPU crypto the IOVA address is not required */ + iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + digest_ptr.va = (void *)sop->aead.digest.data; + aad_ptr.va = (void *)sop->aead.aad.data; + ofs.raw = 0; n = rte_cryptodev_sym_cpu_crypto_process(dev_id, sop->session, ofs, @@ -193,11 +197,11 @@ static void process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op) { int32_t n, st; - void *iv; struct rte_crypto_sym_op *sop; union rte_crypto_sym_ofs ofs; struct rte_crypto_sgl sgl; struct rte_crypto_sym_vec symvec; + struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr; struct rte_crypto_vec vec[UINT8_MAX]; sop = op->sym; @@ -213,13 +217,14 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op) sgl.vec = vec; sgl.num = n; symvec.sgl = &sgl; - iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); - symvec.iv = &iv; - symvec.aad = (void **)&sop->aead.aad.data; - symvec.digest = (void **)&sop->auth.digest.data; + symvec.iv = &iv_ptr; + symvec.digest = &digest_ptr; symvec.status = &st; symvec.num = 1; + iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + digest_ptr.va = (void *)sop->auth.digest.data; + ofs.raw = 0; ofs.ofs.cipher.head = sop->cipher.data.offset - sop->auth.data.offset; ofs.ofs.cipher.tail = (sop->auth.data.offset + sop->auth.data.length) - diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst index c14f750fa8..e7ba35c2d2 100644 --- a/doc/guides/prog_guide/cryptodev_lib.rst +++ b/doc/guides/prog_guide/cryptodev_lib.rst @@ -620,7 +620,8 @@ operation descriptor (``struct rte_crypto_sym_vec``) containing: descriptors of performed operations (``struct rte_crypto_sgl``). Each instance of ``struct rte_crypto_sgl`` consists of a number of segments and a pointer to an array of segment descriptors ``struct rte_crypto_vec``; -- pointers to arrays of size ``num`` containing IV, AAD and digest information, +- pointers to arrays of size ``num`` containing IV, AAD and digest information + in the ``cpu_crypto`` sub-structure, - pointer to an array of size ``num`` where status information will be stored for each operation. diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index c07eadcbcc..c382cd0885 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -345,6 +345,9 @@ API Changes * vhost: Moved vDPA APIs from experimental to stable. +* cryptodev: The structure ``rte_crypto_sym_vec`` is updated to support both + cpu_crypto synchrounous operation and asynchronous raw data-path APIs. + * scheduler: Renamed functions ``rte_cryptodev_scheduler_slave_attach``, ``rte_cryptodev_scheduler_slave_detach`` and ``rte_cryptodev_scheduler_slaves_get`` to diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index aea599ebf3..0de51202a6 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -535,9 +535,10 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s, processed = 0; for (i = 0; i < vec->num; ++i) { aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i], vec->aad[i]); + &vec->sgl[i], vec->iv[i].va, + vec->aad[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } @@ -553,9 +554,10 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s, processed = 0; for (i = 0; i < vec->num; ++i) { aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i], vec->aad[i]); + &vec->sgl[i], vec->iv[i].va, + vec->aad[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } @@ -576,9 +578,9 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s, } aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i]); + &vec->sgl[i], vec->iv[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } @@ -599,9 +601,9 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s, } aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i]); + &vec->sgl[i], vec->iv[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index db09109159..fbbb38af02 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -1931,7 +1931,7 @@ generate_sync_dgst(struct rte_crypto_sym_vec *vec, for (i = 0, k = 0; i != vec->num; i++) { if (vec->status[i] == 0) { - memcpy(vec->digest[i], dgst[i], len); + memcpy(vec->digest[i].va, dgst[i], len); k++; } } @@ -1947,7 +1947,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec, for (i = 0, k = 0; i != vec->num; i++) { if (vec->status[i] == 0) { - if (memcmp(vec->digest[i], dgst[i], len) != 0) + if (memcmp(vec->digest[i].va, dgst[i], len) != 0) vec->status[i] = EBADMSG; else k++; @@ -2010,9 +2010,8 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev, } /* Submit job for processing */ - set_cpu_mb_job_params(job, s, sofs, buf, len, - vec->iv[i], vec->aad[i], tmp_dgst[i], - &vec->status[i]); + set_cpu_mb_job_params(job, s, sofs, buf, len, vec->iv[i].va, + vec->aad[i].va, tmp_dgst[i], &vec->status[i]); job = submit_sync_job(mb_mgr); j++; diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h index f29c98051f..e1f23d303f 100644 --- a/lib/librte_cryptodev/rte_crypto_sym.h +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -51,26 +51,44 @@ struct rte_crypto_sgl { }; /** - * Synchronous operation descriptor. - * Supposed to be used with CPU crypto API call. + * Crypto virtual and IOVA address descriptor, used to describe cryptographic + * data buffer without the length information. The length information is + * normally predefined during session creation. + */ +struct rte_crypto_va_iova_ptr { + void *va; + rte_iova_t iova; +}; + +/** + * Raw data operation descriptor. + * Supposed to be used with synchronous CPU crypto API call or asynchronous + * RAW data path API call. */ struct rte_crypto_sym_vec { + /** number of operations to perform */ + uint32_t num; /** array of SGL vectors */ struct rte_crypto_sgl *sgl; - /** array of pointers to IV */ - void **iv; - /** array of pointers to AAD */ - void **aad; + /** array of pointers to cipher IV */ + struct rte_crypto_va_iova_ptr *iv; /** array of pointers to digest */ - void **digest; + struct rte_crypto_va_iova_ptr *digest; + + __extension__ + union { + /** array of pointers to auth IV, used for chain operation */ + struct rte_crypto_va_iova_ptr *auth_iv; + /** array of pointers to AAD, used for AEAD operation */ + struct rte_crypto_va_iova_ptr *aad; + }; + /** * array of statuses for each operation: - * - 0 on success - * - errno on error + * - 0 on success + * - errno on error */ int32_t *status; - /** number of operations to perform */ - uint32_t num; }; /** diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c index 96eec0131f..2b1df6a032 100644 --- a/lib/librte_ipsec/esp_inb.c +++ b/lib/librte_ipsec/esp_inb.c @@ -693,9 +693,9 @@ cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa; struct replay_sqn *rsn; union sym_op_data icv; - void *iv[num]; - void *aad[num]; - void *dgst[num]; + struct rte_crypto_va_iova_ptr iv[num]; + struct rte_crypto_va_iova_ptr aad[num]; + struct rte_crypto_va_iova_ptr dgst[num]; uint32_t dr[num]; uint32_t l4ofs[num]; uint32_t clen[num]; @@ -720,9 +720,9 @@ cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss, l4ofs + k, rc, ivbuf[k]); /* fill iv, digest and aad */ - iv[k] = ivbuf[k]; - aad[k] = icv.va + sa->icv_len; - dgst[k++] = icv.va; + iv[k].va = ivbuf[k]; + aad[k].va = icv.va + sa->icv_len; + dgst[k++].va = icv.va; } else { dr[i - k] = i; rte_errno = -rc; diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c index fb9d5864c8..1e181cf2ce 100644 --- a/lib/librte_ipsec/esp_outb.c +++ b/lib/librte_ipsec/esp_outb.c @@ -449,9 +449,9 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, uint32_t i, k, n; uint32_t l2, l3; union sym_op_data icv; - void *iv[num]; - void *aad[num]; - void *dgst[num]; + struct rte_crypto_va_iova_ptr iv[num]; + struct rte_crypto_va_iova_ptr aad[num]; + struct rte_crypto_va_iova_ptr dgst[num]; uint32_t dr[num]; uint32_t l4ofs[num]; uint32_t clen[num]; @@ -488,9 +488,9 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, ivbuf[k]); /* fill iv, digest and aad */ - iv[k] = ivbuf[k]; - aad[k] = icv.va + sa->icv_len; - dgst[k++] = icv.va; + iv[k].va = ivbuf[k]; + aad[k].va = icv.va + sa->icv_len; + dgst[k++].va = icv.va; } else { dr[i - k] = i; rte_errno = -rc; diff --git a/lib/librte_ipsec/misc.h b/lib/librte_ipsec/misc.h index 1b543ed875..79b9a20762 100644 --- a/lib/librte_ipsec/misc.h +++ b/lib/librte_ipsec/misc.h @@ -112,7 +112,9 @@ mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs, static inline void cpu_crypto_bulk(const struct rte_ipsec_session *ss, union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[], - void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[], + struct rte_crypto_va_iova_ptr iv[], + struct rte_crypto_va_iova_ptr aad[], + struct rte_crypto_va_iova_ptr dgst[], uint32_t l4ofs[], uint32_t clen[], uint32_t num) { uint32_t i, j, n; @@ -136,8 +138,8 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss, /* fill the request structure */ symvec.sgl = &vecpkt[j]; symvec.iv = &iv[j]; - symvec.aad = &aad[j]; symvec.digest = &dgst[j]; + symvec.aad = &aad[j]; symvec.status = &st[j]; symvec.num = i - j; -- 2.20.1