X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fipsec%2Fcrypto.h;h=93d20aaaa015c890052e5f7c291e0168aab8a730;hb=6e858b4d9244cf53505589673755ab18ac2a4a83;hp=3d03034590caf81de11be00c50e401e492b85e31;hpb=99a2dd955fba6e4cc23b77d590a033650ced9c45;p=dpdk.git diff --git a/lib/ipsec/crypto.h b/lib/ipsec/crypto.h index 3d03034590..93d20aaaa0 100644 --- a/lib/ipsec/crypto.h +++ b/lib/ipsec/crypto.h @@ -21,6 +21,37 @@ struct aesctr_cnt_blk { uint32_t cnt; } __rte_packed; + /* + * CHACHA20-POLY1305 devices have some specific requirements + * for IV and AAD formats. + * Ideally that to be done by the driver itself. + */ + +struct aead_chacha20_poly1305_iv { + uint32_t salt; + uint64_t iv; + uint32_t cnt; +} __rte_packed; + +struct aead_chacha20_poly1305_aad { + uint32_t spi; + /* + * RFC 4106, section 5: + * Two formats of the AAD are defined: + * one for 32-bit sequence numbers, and one for 64-bit ESN. + */ + union { + uint32_t u32[2]; + uint64_t u64; + } sqn; + uint32_t align0; /* align to 16B boundary */ +} __rte_packed; + +struct chacha20_poly1305_esph_iv { + struct rte_esp_hdr esph; + uint64_t iv; +} __rte_packed; + /* * AES-GCM devices have some specific requirements for IV and AAD formats. * Ideally that to be done by the driver itself. @@ -51,6 +82,47 @@ struct gcm_esph_iv { uint64_t iv; } __rte_packed; + /* + * AES-CCM devices have some specific requirements for IV and AAD formats. + * Ideally that to be done by the driver itself. + */ +union aead_ccm_salt { + uint32_t salt; + struct inner { + uint8_t salt8[3]; + uint8_t ccm_flags; + } inner; +} __rte_packed; + + +struct aead_ccm_iv { + uint8_t ccm_flags; + uint8_t salt[3]; + uint64_t iv; + uint32_t cnt; +} __rte_packed; + +struct aead_ccm_aad { + uint8_t padding[18]; + uint32_t spi; + /* + * RFC 4309, section 5: + * Two formats of the AAD are defined: + * one for 32-bit sequence numbers, and one for 64-bit ESN. + */ + union { + uint32_t u32[2]; + uint64_t u64; + } sqn; + uint32_t align0; /* align to 16B boundary */ +} __rte_packed; + +struct ccm_esph_iv { + struct rte_esp_hdr esph; + uint64_t iv; +} __rte_packed; + + static inline void aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce) { @@ -59,6 +131,16 @@ aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce) ctr->cnt = rte_cpu_to_be_32(1); } +static inline void +aead_chacha20_poly1305_iv_fill(struct aead_chacha20_poly1305_iv + *chacha20_poly1305, + uint64_t iv, uint32_t salt) +{ + chacha20_poly1305->salt = salt; + chacha20_poly1305->iv = iv; + chacha20_poly1305->cnt = rte_cpu_to_be_32(1); +} + static inline void aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt) { @@ -67,6 +149,21 @@ aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt) gcm->cnt = rte_cpu_to_be_32(1); } +static inline void +aead_ccm_iv_fill(struct aead_ccm_iv *ccm, uint64_t iv, uint32_t salt) +{ + union aead_ccm_salt tsalt; + + tsalt.salt = salt; + ccm->ccm_flags = tsalt.inner.ccm_flags; + ccm->salt[0] = tsalt.inner.salt8[0]; + ccm->salt[1] = tsalt.inner.salt8[1]; + ccm->salt[2] = tsalt.inner.salt8[2]; + ccm->iv = iv; + ccm->cnt = rte_cpu_to_be_32(1); +} + + /* * RFC 4106, 5 AAD Construction * spi and sqn should already be converted into network byte order. @@ -86,6 +183,25 @@ aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn, aad->align0 = 0; } +/* + * RFC 4309, 5 AAD Construction + * spi and sqn should already be converted into network byte order. + * Make sure that not used bytes are zeroed. + */ +static inline void +aead_ccm_aad_fill(struct aead_ccm_aad *aad, rte_be32_t spi, rte_be64_t sqn, + int esn) +{ + aad->spi = spi; + if (esn) + aad->sqn.u64 = sqn; + else { + aad->sqn.u32[0] = sqn_low32(sqn); + aad->sqn.u32[1] = 0; + } + aad->align0 = 0; +} + static inline void gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn) { @@ -93,6 +209,27 @@ gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn) iv[1] = 0; } + +/* + * RFC 7634, 2.1 AAD Construction + * spi and sqn should already be converted into network byte order. + * Make sure that not used bytes are zeroed. + */ +static inline void +aead_chacha20_poly1305_aad_fill(struct aead_chacha20_poly1305_aad *aad, + rte_be32_t spi, rte_be64_t sqn, + int esn) +{ + aad->spi = spi; + if (esn) + aad->sqn.u64 = sqn; + else { + aad->sqn.u32[0] = sqn_low32(sqn); + aad->sqn.u32[1] = 0; + } + aad->align0 = 0; +} + /* * Helper routine to copy IV * Right now we support only algorithms with IV length equals 0/8/16 bytes.