X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_cryptodev%2Frte_crypto_sym.h;h=f29c98051f967157089df029ae52678393ad9aea;hb=6857fdaff5ee93de0b12f407f5dceb2f433b5aa2;hp=60797e9c0f9c40243f2181c501c35c2ff7ea173e;hpb=369991d997e4abdee355e19ffbb41a4d246cafa2;p=dpdk.git diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h index 60797e9c0f..f29c98051f 100644 --- a/lib/librte_cryptodev/rte_crypto_sym.h +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #ifndef _RTE_CRYPTO_SYM_H_ @@ -25,6 +25,67 @@ extern "C" { #include #include +/** + * Crypto IO Vector (in analogy with struct iovec) + * Supposed be used to pass input/output data buffers for crypto data-path + * functions. + */ +struct rte_crypto_vec { + /** virtual address of the data buffer */ + void *base; + /** IOVA of the data buffer */ + rte_iova_t iova; + /** length of the data buffer */ + uint32_t len; +}; + +/** + * Crypto scatter-gather list descriptor. Consists of a pointer to an array + * of Crypto IO vectors with its size. + */ +struct rte_crypto_sgl { + /** start of an array of vectors */ + struct rte_crypto_vec *vec; + /** size of an array of vectors */ + uint32_t num; +}; + +/** + * Synchronous operation descriptor. + * Supposed to be used with CPU crypto API call. + */ +struct rte_crypto_sym_vec { + /** array of SGL vectors */ + struct rte_crypto_sgl *sgl; + /** array of pointers to IV */ + void **iv; + /** array of pointers to AAD */ + void **aad; + /** array of pointers to digest */ + void **digest; + /** + * array of statuses for each operation: + * - 0 on success + * - errno on error + */ + int32_t *status; + /** number of operations to perform */ + uint32_t num; +}; + +/** + * used for cpu_crypto_process_bulk() to specify head/tail offsets + * for auth/cipher processing. + */ +union rte_crypto_sym_ofs { + uint64_t raw; + struct { + struct { + uint16_t head; + uint16_t tail; + } auth, cipher; + } ofs; +}; /** Symmetric Cipher Algorithms */ enum rte_crypto_cipher_algorithm { @@ -114,8 +175,8 @@ struct rte_crypto_cipher_xform { /**< Cipher algorithm */ struct { - uint8_t *data; /**< pointer to key data */ - uint16_t length;/**< key length in bytes */ + const uint8_t *data; /**< pointer to key data */ + uint16_t length; /**< key length in bytes */ } key; /**< Cipher key * @@ -124,11 +185,6 @@ struct rte_crypto_cipher_xform { * keymask. As per RFC3711, the keymask should be padded with trailing * bytes to match the length of the encryption key used. * - * For AES-XTS mode of operation, two keys must be provided and - * key.data must point to the two keys concatenated together (Key1 || - * Key2). The cipher key length will contain the total size of both - * keys. - * * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes), * 192 bits (24 bytes) or 256 bits (32 bytes). * @@ -140,6 +196,8 @@ struct rte_crypto_cipher_xform { * For the AES-XTS mode of operation: * - Two keys must be provided and key.length refers to total length of * the two keys. + * - key.data must point to the two keys concatenated together + * (key1 || key2). * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). * - Both keys must have the same size. **/ @@ -155,11 +213,6 @@ struct rte_crypto_cipher_xform { * * - For block ciphers in CTR mode, this is the counter. * - * - For GCM mode, this is either the IV (if the length - * is 96 bits) or J0 (for other sizes), where J0 is as - * defined by NIST SP800-38D. Regardless of the IV - * length, a full 16 bytes needs to be allocated. - * * - For CCM mode, the first byte is reserved, and the * nonce should be written starting at &iv[1] (to allow * space for the implementation to write in the flags @@ -187,9 +240,6 @@ struct rte_crypto_cipher_xform { * of the counter (which must be the same as the block * length of the cipher). * - * - For GCM mode, this is either 12 (for 96-bit IVs) - * or 16, in which case data points to J0. - * * - For CCM mode, this is the length of the nonce, * which can be in the range 7 to 13 inclusive. */ @@ -219,9 +269,12 @@ enum rte_crypto_auth_algorithm { /**< HMAC using MD5 algorithm */ RTE_CRYPTO_AUTH_SHA1, - /**< 128 bit SHA algorithm. */ + /**< 160 bit SHA algorithm. */ RTE_CRYPTO_AUTH_SHA1_HMAC, - /**< HMAC using 128 bit SHA algorithm. */ + /**< HMAC using 160 bit SHA algorithm. + * HMAC-SHA-1-96 can be generated by setting + * digest_length to 12 bytes in auth/aead xforms. + */ RTE_CRYPTO_AUTH_SHA224, /**< 224 bit SHA algorithm. */ RTE_CRYPTO_AUTH_SHA224_HMAC, @@ -245,6 +298,23 @@ enum rte_crypto_auth_algorithm { RTE_CRYPTO_AUTH_ZUC_EIA3, /**< ZUC algorithm in EIA3 mode */ + RTE_CRYPTO_AUTH_SHA3_224, + /**< 224 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_224_HMAC, + /**< HMAC using 224 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_256, + /**< 256 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_256_HMAC, + /**< HMAC using 256 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_384, + /**< 384 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_384_HMAC, + /**< HMAC using 384 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_512, + /**< 512 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_SHA3_512_HMAC, + /**< HMAC using 512 bit SHA3 algorithm. */ + RTE_CRYPTO_AUTH_LIST_END }; @@ -276,8 +346,8 @@ struct rte_crypto_auth_xform { /**< Authentication algorithm selection */ struct { - uint8_t *data; /**< pointer to key data */ - uint16_t length;/**< key length in bytes */ + const uint8_t *data; /**< pointer to key data */ + uint16_t length; /**< key length in bytes */ } key; /**< Authentication key data. * The authentication key length MUST be less than or equal to the @@ -292,9 +362,10 @@ struct rte_crypto_auth_xform { * specified as number of bytes from start of crypto * operation (rte_crypto_op). * - * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode and - * for AES-GMAC, this is the authentication - * Initialisation Vector (IV) value. + * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode + * this is the authentication Initialisation Vector + * (IV) value. For AES-GMAC IV description please refer + * to the field `length` in iv struct. * * - For KASUMI in F9 mode and other authentication * algorithms, this field is not used. @@ -311,6 +382,14 @@ struct rte_crypto_auth_xform { * - For KASUMI in F9 mode and other authentication * algorithms, this field is not used. * + * - For GMAC mode, this is either: + * 1) Number greater or equal to one, which means that IV + * is used and J0 will be computed internally, a minimum + * of 16 bytes must be allocated. + * 2) Zero, in which case data points to J0. In this case + * 16 bytes of J0 should be passed where J0 is defined + * by NIST SP800-38D. + * */ } iv; /**< Initialisation vector parameters */ @@ -333,6 +412,8 @@ enum rte_crypto_aead_algorithm { /**< AES algorithm in CCM mode. */ RTE_CRYPTO_AEAD_AES_GCM, /**< AES algorithm in GCM mode. */ + RTE_CRYPTO_AEAD_CHACHA20_POLY1305, + /**< Chacha20 cipher with poly1305 authenticator */ RTE_CRYPTO_AEAD_LIST_END }; @@ -359,8 +440,8 @@ struct rte_crypto_aead_xform { /**< AEAD algorithm selection */ struct { - uint8_t *data; /**< pointer to key data */ - uint16_t length;/**< key length in bytes */ + const uint8_t *data; /**< pointer to key data */ + uint16_t length; /**< key length in bytes */ } key; struct { @@ -369,11 +450,6 @@ struct rte_crypto_aead_xform { * specified as number of bytes from start of crypto * operation (rte_crypto_op). * - * - For GCM mode, this is either the IV (if the length - * is 96 bits) or J0 (for other sizes), where J0 is as - * defined by NIST SP800-38D. Regardless of the IV - * length, a full 16 bytes needs to be allocated. - * * - For CCM mode, the first byte is reserved, and the * nonce should be written starting at &iv[1] (to allow * space for the implementation to write in the flags @@ -381,17 +457,29 @@ struct rte_crypto_aead_xform { * be allocated, even though the length field will * have a value less than this. * + * - For Chacha20-Poly1305 it is 96-bit nonce. + * PMD sets initial counter for Poly1305 key generation + * part to 0 and for Chacha20 encryption to 1 as per + * rfc8439 2.8. AEAD construction. + * * For optimum performance, the data pointed to SHOULD * be 8-byte aligned. */ uint16_t length; /**< Length of valid IV data. * - * - For GCM mode, this is either 12 (for 96-bit IVs) - * or 16, in which case data points to J0. + * - For GCM mode, this is either: + * 1) Number greater or equal to one, which means that IV + * is used and J0 will be computed internally, a minimum + * of 16 bytes must be allocated. + * 2) Zero, in which case data points to J0. In this case + * 16 bytes of J0 should be passed where J0 is defined + * by NIST SP800-38D. * * - For CCM mode, this is the length of the nonce, * which can be in the range 7 to 13 inclusive. + * + * - For Chacha20-Poly1305 this field is always 12. */ } iv; /**< Initialisation vector parameters */ @@ -575,7 +663,9 @@ struct rte_crypto_sym_op { * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2, * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, - * this field should be in bits. + * this field should be in bits. For + * digest-encrypted cases this must be + * an 8-bit multiple. */ uint32_t length; /**< The message length, in bytes, of the @@ -589,7 +679,9 @@ struct rte_crypto_sym_op { * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2, * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, - * this field should be in bits. + * this field should be in bits. For + * digest-encrypted cases this must be + * an 8-bit multiple. */ } data; /**< Data offsets and length for ciphering */ } cipher; @@ -605,12 +697,22 @@ struct rte_crypto_sym_op { * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, - * this field should be in bits. + * this field should be in bits. For + * digest-encrypted cases this must be + * an 8-bit multiple. * * @note * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, * this offset should be such that * data to authenticate starts at COUNT. + * + * @note + * For DOCSIS security protocol, this + * offset is the DOCSIS header length + * and, therefore, also the CRC offset + * i.e. the number of bytes into the + * packet at which CRC calculation + * should begin. */ uint32_t length; /**< The message length, in bytes, of the source @@ -620,13 +722,21 @@ struct rte_crypto_sym_op { * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, - * this field should be in bits. + * this field should be in bits. For + * digest-encrypted cases this must be + * an 8-bit multiple. * * @note * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, * the length should include the COUNT, * FRESH, message, direction bit and padding * (to be multiple of 8 bits). + * + * @note + * For DOCSIS security protocol, this + * is the CRC length i.e. the number of + * bytes in the packet over which the + * CRC should be calculated */ } data; /**< Data offsets and length for authentication */ @@ -651,6 +761,57 @@ struct rte_crypto_sym_op { * For digest generation, the digest result * will overwrite any data at this location. * + * @note + * Digest-encrypted case. + * Digest can be generated, appended to + * the end of raw data and encrypted + * together using chained digest + * generation + * (@ref RTE_CRYPTO_AUTH_OP_GENERATE) + * and encryption + * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT) + * xforms. Similarly, authentication + * of the raw data against appended, + * decrypted digest, can be performed + * using decryption + * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT) + * and digest verification + * (@ref RTE_CRYPTO_AUTH_OP_VERIFY) + * chained xforms. + * To perform those operations, a few + * additional conditions must be met: + * - caller must allocate at least + * digest_length of memory at the end of + * source and (in case of out-of-place + * operations) destination buffer; those + * buffers can be linear or split using + * scatter-gather lists, + * - digest data pointer must point to + * the end of source or (in case of + * out-of-place operations) destination + * data, which is pointer to the + * data buffer + auth.data.offset + + * auth.data.length, + * - cipher.data.offset + + * cipher.data.length must be greater + * than auth.data.offset + + * auth.data.length and is typically + * equal to auth.data.offset + + * auth.data.length + digest_length. + * - for wireless algorithms, i.e. + * SNOW 3G, KASUMI and ZUC, as the + * cipher.data.length, + * cipher.data.offset, + * auth.data.length and + * auth.data.offset are in bits, they + * must be 8-bit multiples. + * + * Note, that for security reasons, it + * is PMDs' responsibility to not + * leave an unencrypted digest in any + * buffer after performing auth-cipher + * operations. + * */ rte_iova_t phys_addr; /**< Physical address of digest */ @@ -715,6 +876,75 @@ __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, return 0; } +/** + * Converts portion of mbuf data into a vector representation. + * Each segment will be represented as a separate entry in *vec* array. + * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*. + * @param mb + * Pointer to the *rte_mbuf* object. + * @param ofs + * Offset within mbuf data to start with. + * @param len + * Length of data to represent. + * @param vec + * Pointer to an output array of IO vectors. + * @param num + * Size of an output array. + * @return + * - number of successfully filled entries in *vec* array. + * - negative number of elements in *vec* array required. + */ +__rte_experimental +static inline int +rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len, + struct rte_crypto_vec vec[], uint32_t num) +{ + uint32_t i; + struct rte_mbuf *nseg; + uint32_t left; + uint32_t seglen; + + /* assuming that requested data starts in the first segment */ + RTE_ASSERT(mb->data_len > ofs); + + if (mb->nb_segs > num) + return -mb->nb_segs; + + vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs); + vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs); + + /* whole data lies in the first segment */ + seglen = mb->data_len - ofs; + if (len <= seglen) { + vec[0].len = len; + return 1; + } + + /* data spread across segments */ + vec[0].len = seglen; + left = len - seglen; + for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) { + + vec[i].base = rte_pktmbuf_mtod(nseg, void *); + vec[i].iova = rte_pktmbuf_iova(nseg); + + seglen = nseg->data_len; + if (left <= seglen) { + /* whole requested data is completed */ + vec[i].len = left; + left = 0; + break; + } + + /* use whole segment */ + vec[i].len = seglen; + left -= seglen; + } + + RTE_ASSERT(left == 0); + return i + 1; +} + #ifdef __cplusplus }