X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_cryptodev%2Frte_crypto_sym.h;h=5f859ec382cc902530460b860f9e5915cd0831d8;hb=36572831720450f3657480de36721fee7c65f92d;hp=c78258813fafe7b6c3c74768f39b480566a435ee;hpb=38227c0e3ad2922aa98ecb784296e3e97604506c;p=dpdk.git diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h index c78258813f..5f859ec382 100644 --- a/lib/librte_cryptodev/rte_crypto_sym.h +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2016 Intel Corporation. All rights reserved. + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -68,28 +68,12 @@ enum rte_crypto_cipher_algorithm { RTE_CRYPTO_CIPHER_AES_CBC, /**< AES algorithm in CBC mode */ - RTE_CRYPTO_CIPHER_AES_CCM, - /**< AES algorithm in CCM mode. When this cipher algorithm is used the - * *RTE_CRYPTO_AUTH_AES_CCM* element of the - * *rte_crypto_hash_algorithm* enum MUST be used to set up the related - * *rte_crypto_auth_xform* structure in the session context or in - * the op_params of the crypto operation structure in the case of a - * session-less crypto operation - */ RTE_CRYPTO_CIPHER_AES_CTR, /**< AES algorithm in Counter mode */ RTE_CRYPTO_CIPHER_AES_ECB, /**< AES algorithm in ECB mode */ RTE_CRYPTO_CIPHER_AES_F8, /**< AES algorithm in F8 mode */ - RTE_CRYPTO_CIPHER_AES_GCM, - /**< AES algorithm in GCM mode. When this cipher algorithm is used the - * *RTE_CRYPTO_AUTH_AES_GCM* or *RTE_CRYPTO_AUTH_AES_GMAC* element - * of the *rte_crypto_auth_algorithm* enum MUST be used to set up - * the related *rte_crypto_auth_setup_data* structure in the session - * context or in the op_params of the crypto operation structure - * in the case of a session-less crypto operation. - */ RTE_CRYPTO_CIPHER_AES_XTS, /**< AES algorithm in XTS mode */ @@ -108,6 +92,20 @@ enum rte_crypto_cipher_algorithm { RTE_CRYPTO_CIPHER_DES_CBC, /**< DES algorithm in CBC mode */ + RTE_CRYPTO_CIPHER_AES_DOCSISBPI, + /**< AES algorithm using modes required by + * DOCSIS Baseline Privacy Plus Spec. + * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next + * for m_src and m_dst in the rte_crypto_sym_op must be NULL. + */ + + RTE_CRYPTO_CIPHER_DES_DOCSISBPI, + /**< DES algorithm using modes required by + * DOCSIS Baseline Privacy Plus Spec. + * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next + * for m_src and m_dst in the rte_crypto_sym_op must be NULL. + */ + RTE_CRYPTO_CIPHER_LIST_END }; @@ -145,7 +143,7 @@ struct rte_crypto_cipher_xform { struct { uint8_t *data; /**< pointer to key data */ - size_t length; /**< key length in bytes */ + uint16_t length;/**< key length in bytes */ } key; /**< Cipher key * @@ -176,6 +174,57 @@ struct rte_crypto_cipher_xform { * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). * - Both keys must have the same size. **/ + struct { + uint16_t offset; + /**< Starting point for Initialisation Vector or Counter, + * specified as number of bytes from start of crypto + * operation (rte_crypto_op). + * + * - For block ciphers in CBC or F8 mode, or for KASUMI + * in F8 mode, or for SNOW 3G in UEA2 mode, this is the + * Initialisation Vector (IV) value. + * + * - For block ciphers in CTR mode, this is the counter. + * + * - For GCM mode, this is either the IV (if the length + * is 96 bits) or J0 (for other sizes), where J0 is as + * defined by NIST SP800-38D. Regardless of the IV + * length, a full 16 bytes needs to be allocated. + * + * - For CCM mode, the first byte is reserved, and the + * nonce should be written starting at &iv[1] (to allow + * space for the implementation to write in the flags + * in the first byte). Note that a full 16 bytes should + * be allocated, even though the length field will + * have a value less than this. Note that the PMDs may + * modify the memory reserved (the first byte and the + * final padding) + * + * - For AES-XTS, this is the 128bit tweak, i, from + * IEEE Std 1619-2007. + * + * For optimum performance, the data pointed to SHOULD + * be 8-byte aligned. + */ + uint16_t length; + /**< Length of valid IV data. + * + * - For block ciphers in CBC or F8 mode, or for KASUMI + * in F8 mode, or for SNOW 3G in UEA2 mode, this is the + * length of the IV (which must be the same as the + * block length of the cipher). + * + * - For block ciphers in CTR mode, this is the length + * of the counter (which must be the same as the block + * length of the cipher). + * + * - For GCM mode, this is either 12 (for 96-bit IVs) + * or 16, in which case data points to J0. + * + * - For CCM mode, this is the length of the nonce, + * which can be in the range 7 to 13 inclusive. + */ + } iv; /**< Initialisation vector parameters */ }; /** Symmetric Authentication / Hash Algorithms */ @@ -185,33 +234,10 @@ enum rte_crypto_auth_algorithm { RTE_CRYPTO_AUTH_AES_CBC_MAC, /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */ - RTE_CRYPTO_AUTH_AES_CCM, - /**< AES algorithm in CCM mode. This is an authenticated cipher. When - * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM* - * element of the *rte_crypto_cipher_algorithm* enum MUST be used to - * set up the related rte_crypto_cipher_setup_data structure in the - * session context or the corresponding parameter in the crypto - * operation data structures op_params parameter MUST be set for a - * session-less crypto operation. - */ RTE_CRYPTO_AUTH_AES_CMAC, /**< AES CMAC algorithm. */ - RTE_CRYPTO_AUTH_AES_GCM, - /**< AES algorithm in GCM mode. When this hash algorithm - * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the - * rte_crypto_cipher_algorithm enum MUST be used to set up the related - * rte_crypto_cipher_setup_data structure in the session context, or - * the corresponding parameter in the crypto operation data structures - * op_params parameter MUST be set for a session-less crypto operation. - */ RTE_CRYPTO_AUTH_AES_GMAC, - /**< AES GMAC algorithm. When this hash algorithm - * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the - * rte_crypto_cipher_algorithm enum MUST be used to set up the related - * rte_crypto_cipher_setup_data structure in the session context, or - * the corresponding parameter in the crypto operation data structures - * op_params parameter MUST be set for a session-less crypto operation. - */ + /**< AES GMAC algorithm. */ RTE_CRYPTO_AUTH_AES_XCBC_MAC, /**< AES XCBC algorithm. */ @@ -282,7 +308,7 @@ struct rte_crypto_auth_xform { struct { uint8_t *data; /**< pointer to key data */ - size_t length; /**< key length in bytes */ + uint16_t length;/**< key length in bytes */ } key; /**< Authentication key data. * The authentication key length MUST be less than or equal to the @@ -291,53 +317,127 @@ struct rte_crypto_auth_xform { * (for example RFC 2104, FIPS 198a). */ - uint32_t digest_length; + struct { + uint16_t offset; + /**< Starting point for Initialisation Vector or Counter, + * specified as number of bytes from start of crypto + * operation (rte_crypto_op). + * + * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode and + * for AES-GMAC, this is the authentication + * Initialisation Vector (IV) value. + * + * - For KASUMI in F9 mode and other authentication + * algorithms, this field is not used. + * + * For optimum performance, the data pointed to SHOULD + * be 8-byte aligned. + */ + uint16_t length; + /**< Length of valid IV data. + * + * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and + * for AES-GMAC, this is the length of the IV. + * + * - For KASUMI in F9 mode and other authentication + * algorithms, this field is not used. + * + */ + } iv; /**< Initialisation vector parameters */ + + uint16_t digest_length; /**< Length of the digest to be returned. If the verify option is set, * this specifies the length of the digest to be compared for the * session. * + * It is the caller's responsibility to ensure that the + * digest length is compliant with the hash algorithm being used. * If the value is less than the maximum length allowed by the hash, - * the result shall be truncated. If the value is greater than the - * maximum length allowed by the hash then an error will be generated - * by *rte_cryptodev_sym_session_create* or by the - * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs. + * the result shall be truncated. */ +}; - uint32_t add_auth_data_length; - /**< The length of the additional authenticated data (AAD) in bytes. - * The maximum permitted value is 240 bytes, unless otherwise specified - * below. - * - * This field must be specified when the hash algorithm is one of the - * following: - * - * - For SNOW 3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the - * length of the IV (which should be 16). - * - * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is - * the length of the Additional Authenticated Data (called A, in NIST - * SP800-38D). - * - * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is - * the length of the associated data (called A, in NIST SP800-38C). - * Note that this does NOT include the length of any padding, or the - * 18 bytes reserved at the start of the above field to store the - * block B0 and the encoded length. The maximum permitted value in - * this case is 222 bytes. - * - * @note - * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation - * this field is not used and should be set to 0. Instead the length - * of the AAD data is specified in additional authentication data - * length field of the rte_crypto_sym_op_data structure - */ + +/** Symmetric AEAD Algorithms */ +enum rte_crypto_aead_algorithm { + RTE_CRYPTO_AEAD_AES_CCM = 1, + /**< AES algorithm in CCM mode. */ + RTE_CRYPTO_AEAD_AES_GCM, + /**< AES algorithm in GCM mode. */ + RTE_CRYPTO_AEAD_LIST_END +}; + +/** AEAD algorithm name strings */ +extern const char * +rte_crypto_aead_algorithm_strings[]; + +/** Symmetric AEAD Operations */ +enum rte_crypto_aead_operation { + RTE_CRYPTO_AEAD_OP_ENCRYPT, + /**< Encrypt and generate digest */ + RTE_CRYPTO_AEAD_OP_DECRYPT + /**< Verify digest and decrypt */ +}; + +/** Authentication operation name strings */ +extern const char * +rte_crypto_aead_operation_strings[]; + +struct rte_crypto_aead_xform { + enum rte_crypto_aead_operation op; + /**< AEAD operation type */ + enum rte_crypto_aead_algorithm algo; + /**< AEAD algorithm selection */ + + struct { + uint8_t *data; /**< pointer to key data */ + uint16_t length;/**< key length in bytes */ + } key; + + struct { + uint16_t offset; + /**< Starting point for Initialisation Vector or Counter, + * specified as number of bytes from start of crypto + * operation (rte_crypto_op). + * + * - For GCM mode, this is either the IV (if the length + * is 96 bits) or J0 (for other sizes), where J0 is as + * defined by NIST SP800-38D. Regardless of the IV + * length, a full 16 bytes needs to be allocated. + * + * - For CCM mode, the first byte is reserved, and the + * nonce should be written starting at &iv[1] (to allow + * space for the implementation to write in the flags + * in the first byte). Note that a full 16 bytes should + * be allocated, even though the length field will + * have a value less than this. + * + * For optimum performance, the data pointed to SHOULD + * be 8-byte aligned. + */ + uint16_t length; + /**< Length of valid IV data. + * + * - For GCM mode, this is either 12 (for 96-bit IVs) + * or 16, in which case data points to J0. + * + * - For CCM mode, this is the length of the nonce, + * which can be in the range 7 to 13 inclusive. + */ + } iv; /**< Initialisation vector parameters */ + + uint16_t digest_length; + + uint16_t aad_length; + /**< The length of the additional authenticated data (AAD) in bytes. */ }; /** Crypto transformation types */ enum rte_crypto_sym_xform_type { RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */ RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */ - RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */ + RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */ + RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */ }; /** @@ -360,20 +460,11 @@ struct rte_crypto_sym_xform { /**< Authentication / hash xform */ struct rte_crypto_cipher_xform cipher; /**< Cipher xform */ + struct rte_crypto_aead_xform aead; + /**< AEAD xform */ }; }; -/** - * Crypto operation session type. This is used to specify whether a crypto - * operation has session structure attached for immutable parameters or if all - * operation information is included in the operation data structure. - */ -enum rte_crypto_sym_op_sess_type { - RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */ - RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */ -}; - - struct rte_cryptodev_sym_session; /** @@ -410,8 +501,6 @@ struct rte_crypto_sym_op { struct rte_mbuf *m_src; /**< source mbuf */ struct rte_mbuf *m_dst; /**< destination mbuf */ - enum rte_crypto_sym_op_sess_type sess_type; - RTE_STD_C11 union { struct rte_cryptodev_sym_session *session; @@ -420,222 +509,181 @@ struct rte_crypto_sym_op { /**< Session-less API crypto operation parameters */ }; - struct { - struct { - uint32_t offset; - /**< Starting point for cipher processing, specified - * as number of bytes from start of data in the source - * buffer. The result of the cipher operation will be - * written back into the output buffer starting at - * this location. - * - * @note - * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2, - * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 - * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, - * this field should be in bits. - */ - - uint32_t length; - /**< The message length, in bytes, of the source buffer - * on which the cryptographic operation will be - * computed. This must be a multiple of the block size - * if a block cipher is being used. This is also the - * same as the result length. - * - * @note - * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM, - * this value should not include the length of the - * padding or the length of the MAC; the driver will - * compute the actual number of bytes over which the - * encryption will occur, which will include these - * values. - * - * @note - * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this - * field should be set to 0. - * - * @note - * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2, - * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 - * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, - * this field should be in bits. - */ - } data; /**< Data offsets and length for ciphering */ - - struct { - uint8_t *data; - /**< Initialisation Vector or Counter. - * - * - For block ciphers in CBC or F8 mode, or for KASUMI - * in F8 mode, or for SNOW 3G in UEA2 mode, this is the - * Initialisation Vector (IV) value. - * - * - For block ciphers in CTR mode, this is the counter. - * - * - For GCM mode, this is either the IV (if the length - * is 96 bits) or J0 (for other sizes), where J0 is as - * defined by NIST SP800-38D. Regardless of the IV - * length, a full 16 bytes needs to be allocated. - * - * - For CCM mode, the first byte is reserved, and the - * nonce should be written starting at &iv[1] (to allow - * space for the implementation to write in the flags - * in the first byte). Note that a full 16 bytes should - * be allocated, even though the length field will - * have a value less than this. - * - * - For AES-XTS, this is the 128bit tweak, i, from - * IEEE Std 1619-2007. - * - * For optimum performance, the data pointed to SHOULD - * be 8-byte aligned. - */ - phys_addr_t phys_addr; - uint16_t length; - /**< Length of valid IV data. - * - * - For block ciphers in CBC or F8 mode, or for KASUMI - * in F8 mode, or for SNOW 3G in UEA2 mode, this is the - * length of the IV (which must be the same as the - * block length of the cipher). - * - * - For block ciphers in CTR mode, this is the length - * of the counter (which must be the same as the block - * length of the cipher). - * - * - For GCM mode, this is either 12 (for 96-bit IVs) - * or 16, in which case data points to J0. - * - * - For CCM mode, this is the length of the nonce, - * which can be in the range 7 to 13 inclusive. - */ - } iv; /**< Initialisation vector parameters */ - } cipher; - - struct { - struct { - uint32_t offset; - /**< Starting point for hash processing, specified as - * number of bytes from start of packet in source - * buffer. - * - * @note - * For CCM and GCM modes of operation, this field is - * ignored. The field @ref aad field - * should be set instead. - * - * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) - * mode of operation, this field is set to 0. aad data - * pointer of rte_crypto_sym_op_data structure is - * used instead - * - * @note - * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, - * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 - * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, - * this field should be in bits. - */ - - uint32_t length; - /**< The message length, in bytes, of the source - * buffer that the hash will be computed on. - * - * @note - * For CCM and GCM modes of operation, this field is - * ignored. The field @ref aad field should be set - * instead. - * - * @note - * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode - * of operation, this field is set to 0. - * Auth.aad.length is used instead. - * - * @note - * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, - * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 - * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, - * this field should be in bits. - */ - } data; /**< Data offsets and length for authentication */ - + RTE_STD_C11 + union { struct { - uint8_t *data; - /**< This points to the location where the digest result - * should be inserted (in the case of digest generation) - * or where the purported digest exists (in the case of - * digest verification). - * - * At session creation time, the client specified the - * digest result length with the digest_length member - * of the @ref rte_crypto_auth_xform structure. For - * physical crypto devices the caller must allocate at - * least digest_length of physically contiguous memory - * at this location. - * - * For digest generation, the digest result will - * overwrite any data at this location. - * - * @note - * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for - * "digest result" read "authentication tag T". - */ - phys_addr_t phys_addr; - /**< Physical address of digest */ - uint16_t length; - /**< Length of digest */ - } digest; /**< Digest parameters */ + struct { + uint32_t offset; + /**< Starting point for AEAD processing, specified as + * number of bytes from start of packet in source + * buffer. + */ + uint32_t length; + /**< The message length, in bytes, of the source buffer + * on which the cryptographic operation will be + * computed. This must be a multiple of the block size + */ + } data; /**< Data offsets and length for AEAD */ + struct { + uint8_t *data; + /**< This points to the location where the digest result + * should be inserted (in the case of digest generation) + * or where the purported digest exists (in the case of + * digest verification). + * + * At session creation time, the client specified the + * digest result length with the digest_length member + * of the @ref rte_crypto_auth_xform structure. For + * physical crypto devices the caller must allocate at + * least digest_length of physically contiguous memory + * at this location. + * + * For digest generation, the digest result will + * overwrite any data at this location. + * + * @note + * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for + * "digest result" read "authentication tag T". + */ + phys_addr_t phys_addr; + /**< Physical address of digest */ + } digest; /**< Digest parameters */ + struct { + uint8_t *data; + /**< Pointer to Additional Authenticated Data (AAD) + * needed for authenticated cipher mechanisms (CCM and + * GCM) + * + * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM), + * the caller should setup this field as follows: + * + * - the additional authentication data itself should + * be written starting at an offset of 18 bytes into + * the array, leaving room for the first block (16 bytes) + * and the length encoding in the first two bytes of the + * second block. + * + * - the array should be big enough to hold the above + * fields, plus any padding to round this up to the + * nearest multiple of the block size (16 bytes). + * Padding will be added by the implementation. + * + * - Note that PMDs may modify the memory reserved + * (first 18 bytes and the final padding). + * + * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the + * caller should setup this field as follows: + * + * - the AAD is written in starting at byte 0 + * - the array must be big enough to hold the AAD, plus + * any space to round this up to the nearest multiple + * of the block size (16 bytes). + * + */ + phys_addr_t phys_addr; /**< physical address */ + } aad; + /**< Additional authentication parameters */ + } aead; struct { - uint8_t *data; - /**< Pointer to Additional Authenticated Data (AAD) - * needed for authenticated cipher mechanisms (CCM and - * GCM), and to the IV for SNOW 3G authentication - * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other - * authentication mechanisms this pointer is ignored. - * - * The length of the data pointed to by this field is - * set up for the session in the @ref - * rte_crypto_auth_xform structure as part of the @ref - * rte_cryptodev_sym_session_create function call. - * This length must not exceed 240 bytes. - * - * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), - * the caller should setup this field as follows: - * - * - the nonce should be written starting at an offset - * of one byte into the array, leaving room for the - * implementation to write in the flags to the first - * byte. - * - * - the additional authentication data itself should - * be written starting at an offset of 18 bytes into - * the array, leaving room for the length encoding in - * the first two bytes of the second block. - * - * - the array should be big enough to hold the above - * fields, plus any padding to round this up to the - * nearest multiple of the block size (16 bytes). - * Padding will be added by the implementation. - * - * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the - * caller should setup this field as follows: - * - * - the AAD is written in starting at byte 0 - * - the array must be big enough to hold the AAD, plus - * any space to round this up to the nearest multiple - * of the block size (16 bytes). - * - * @note - * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of - * operation, this field is used to pass plaintext. - */ - phys_addr_t phys_addr; /**< physical address */ - uint16_t length; /**< Length of digest */ - } aad; - /**< Additional authentication parameters */ - } auth; -} __rte_cache_aligned; + struct { + struct { + uint32_t offset; + /**< Starting point for cipher processing, + * specified as number of bytes from start + * of data in the source buffer. + * The result of the cipher operation will be + * written back into the output buffer + * starting at this location. + * + * @note + * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2, + * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 + * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, + * this field should be in bits. + */ + uint32_t length; + /**< The message length, in bytes, of the + * source buffer on which the cryptographic + * operation will be computed. + * This must be a multiple of the block size + * if a block cipher is being used. This is + * also the same as the result length. + * + * @note + * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2, + * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 + * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, + * this field should be in bits. + */ + } data; /**< Data offsets and length for ciphering */ + } cipher; + + struct { + struct { + uint32_t offset; + /**< Starting point for hash processing, + * specified as number of bytes from start of + * packet in source buffer. + * + * @note + * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, + * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 + * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, + * this field should be in bits. + * + * @note + * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, + * this offset should be such that + * data to authenticate starts at COUNT. + */ + uint32_t length; + /**< The message length, in bytes, of the source + * buffer that the hash will be computed on. + * + * @note + * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, + * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 + * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, + * this field should be in bits. + * + * @note + * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, + * the length should include the COUNT, + * FRESH, message, direction bit and padding + * (to be multiple of 8 bits). + */ + } data; + /**< Data offsets and length for authentication */ + + struct { + uint8_t *data; + /**< This points to the location where + * the digest result should be inserted + * (in the case of digest generation) + * or where the purported digest exists + * (in the case of digest verification). + * + * At session creation time, the client + * specified the digest result length with + * the digest_length member of the + * @ref rte_crypto_auth_xform structure. + * For physical crypto devices the caller + * must allocate at least digest_length of + * physically contiguous memory at this + * location. + * + * For digest generation, the digest result + * will overwrite any data at this location. + * + */ + phys_addr_t phys_addr; + /**< Physical address of digest */ + } digest; /**< Digest parameters */ + } auth; + }; + }; +}; /** @@ -647,8 +695,6 @@ static inline void __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op) { memset(op, 0, sizeof(*op)); - - op->sess_type = RTE_CRYPTO_SYM_OP_SESSIONLESS; } @@ -690,7 +736,6 @@ __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, struct rte_cryptodev_sym_session *sess) { sym_op->session = sess; - sym_op->sess_type = RTE_CRYPTO_SYM_OP_WITH_SESSION; return 0; }