1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
9 * @file rte_crypto_sym.h
11 * RTE Definitions for Symmetric Cryptography
13 * Defines symmetric cipher and authentication algorithms and modes, as well
14 * as supported symmetric crypto operation combinations.
24 #include <rte_memory.h>
25 #include <rte_mempool.h>
26 #include <rte_common.h>
29 * Crypto IO Vector (in analogy with struct iovec)
30 * Supposed be used to pass input/output data buffers for crypto data-path
33 struct rte_crypto_vec {
34 /** virtual address of the data buffer */
36 /** IOVA of the data buffer */
38 /** length of the data buffer */
40 /** total buffer length */
45 * Crypto scatter-gather list descriptor. Consists of a pointer to an array
46 * of Crypto IO vectors with its size.
48 struct rte_crypto_sgl {
49 /** start of an array of vectors */
50 struct rte_crypto_vec *vec;
51 /** size of an array of vectors */
56 * Crypto virtual and IOVA address descriptor, used to describe cryptographic
57 * data buffer without the length information. The length information is
58 * normally predefined during session creation.
60 struct rte_crypto_va_iova_ptr {
66 * Raw data operation descriptor.
67 * Supposed to be used with synchronous CPU crypto API call or asynchronous
68 * RAW data path API call.
70 struct rte_crypto_sym_vec {
71 /** number of operations to perform */
73 /** array of SGL vectors */
74 struct rte_crypto_sgl *src_sgl;
75 /** array of pointers to cipher IV */
76 struct rte_crypto_va_iova_ptr *iv;
77 /** array of pointers to digest */
78 struct rte_crypto_va_iova_ptr *digest;
82 /** array of pointers to auth IV, used for chain operation */
83 struct rte_crypto_va_iova_ptr *auth_iv;
84 /** array of pointers to AAD, used for AEAD operation */
85 struct rte_crypto_va_iova_ptr *aad;
89 * array of statuses for each operation:
97 * used for cpu_crypto_process_bulk() to specify head/tail offsets
98 * for auth/cipher processing.
100 union rte_crypto_sym_ofs {
110 /** Symmetric Cipher Algorithms
112 * Note, to avoid ABI breakage across releases
113 * - LIST_END should not be added to this enum
114 * - the order of enums should not be changed
115 * - new algorithms should only be added to the end
117 enum rte_crypto_cipher_algorithm {
118 RTE_CRYPTO_CIPHER_NULL = 1,
119 /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
121 RTE_CRYPTO_CIPHER_3DES_CBC,
122 /**< Triple DES algorithm in CBC mode */
123 RTE_CRYPTO_CIPHER_3DES_CTR,
124 /**< Triple DES algorithm in CTR mode */
125 RTE_CRYPTO_CIPHER_3DES_ECB,
126 /**< Triple DES algorithm in ECB mode */
128 RTE_CRYPTO_CIPHER_AES_CBC,
129 /**< AES algorithm in CBC mode */
130 RTE_CRYPTO_CIPHER_AES_CTR,
131 /**< AES algorithm in Counter mode */
132 RTE_CRYPTO_CIPHER_AES_ECB,
133 /**< AES algorithm in ECB mode */
134 RTE_CRYPTO_CIPHER_AES_F8,
135 /**< AES algorithm in F8 mode */
136 RTE_CRYPTO_CIPHER_AES_XTS,
137 /**< AES algorithm in XTS mode */
139 RTE_CRYPTO_CIPHER_ARC4,
140 /**< (A)RC4 cipher algorithm */
142 RTE_CRYPTO_CIPHER_KASUMI_F8,
143 /**< KASUMI algorithm in F8 mode */
145 RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
146 /**< SNOW 3G algorithm in UEA2 mode */
148 RTE_CRYPTO_CIPHER_ZUC_EEA3,
149 /**< ZUC algorithm in EEA3 mode */
151 RTE_CRYPTO_CIPHER_DES_CBC,
152 /**< DES algorithm in CBC mode */
154 RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
155 /**< AES algorithm using modes required by
156 * DOCSIS Baseline Privacy Plus Spec.
157 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
158 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
161 RTE_CRYPTO_CIPHER_DES_DOCSISBPI
162 /**< DES algorithm using modes required by
163 * DOCSIS Baseline Privacy Plus Spec.
164 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
165 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
169 /** Cipher algorithm name strings */
171 rte_crypto_cipher_algorithm_strings[];
173 /** Symmetric Cipher Direction */
174 enum rte_crypto_cipher_operation {
175 RTE_CRYPTO_CIPHER_OP_ENCRYPT,
176 /**< Encrypt cipher operation */
177 RTE_CRYPTO_CIPHER_OP_DECRYPT
178 /**< Decrypt cipher operation */
181 /** Cipher operation name strings */
183 rte_crypto_cipher_operation_strings[];
186 * Symmetric Cipher Setup Data.
188 * This structure contains data relating to Cipher (Encryption and Decryption)
189 * use to create a session.
191 struct rte_crypto_cipher_xform {
192 enum rte_crypto_cipher_operation op;
193 /**< This parameter determines if the cipher operation is an encrypt or
194 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
195 * only encrypt operations are valid.
197 enum rte_crypto_cipher_algorithm algo;
198 /**< Cipher algorithm */
201 const uint8_t *data; /**< pointer to key data */
202 uint16_t length; /**< key length in bytes */
206 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
207 * original key data provided may be wrapped(encrypted) using key wrap
208 * algorithm such as AES key wrap (rfc3394) and hence length of the key
209 * may increase beyond the PMD advertised supported key size.
210 * PMD shall validate the key length and report EMSGSIZE error while
211 * configuring the session and application can skip checking the
212 * capability key length in such cases.
214 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
215 * point to a concatenation of the AES encryption key followed by a
216 * keymask. As per RFC3711, the keymask should be padded with trailing
217 * bytes to match the length of the encryption key used.
219 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
220 * 192 bits (24 bytes) or 256 bits (32 bytes).
222 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
223 * should be set to the combined length of the encryption key and the
224 * keymask. Since the keymask and the encryption key are the same size,
225 * key.length should be set to 2 x the AES encryption key length.
227 * For the AES-XTS mode of operation:
228 * - Two keys must be provided and key.length refers to total length of
230 * - key.data must point to the two keys concatenated together
232 * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
233 * - Both keys must have the same size.
237 /**< Starting point for Initialisation Vector or Counter,
238 * specified as number of bytes from start of crypto
239 * operation (rte_crypto_op).
241 * - For block ciphers in CBC or F8 mode, or for KASUMI
242 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
243 * Initialisation Vector (IV) value.
245 * - For block ciphers in CTR mode, this is the counter.
247 * - For CCM mode, the first byte is reserved, and the
248 * nonce should be written starting at &iv[1] (to allow
249 * space for the implementation to write in the flags
250 * in the first byte). Note that a full 16 bytes should
251 * be allocated, even though the length field will
252 * have a value less than this. Note that the PMDs may
253 * modify the memory reserved (the first byte and the
256 * - For AES-XTS, this is the 128bit tweak, i, from
257 * IEEE Std 1619-2007.
259 * For optimum performance, the data pointed to SHOULD
263 /**< Length of valid IV data.
265 * - For block ciphers in CBC or F8 mode, or for KASUMI
266 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
267 * length of the IV (which must be the same as the
268 * block length of the cipher).
270 * - For block ciphers in CTR mode, this is the length
271 * of the counter (which must be the same as the block
272 * length of the cipher).
274 * - For CCM mode, this is the length of the nonce,
275 * which can be in the range 7 to 13 inclusive.
277 } iv; /**< Initialisation vector parameters */
279 uint32_t dataunit_len;
280 /**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
281 * this is the data-unit length of the algorithm,
282 * otherwise or when the value is 0, use the operation length.
283 * The value should be in the range defined by the dataunit_set field
284 * in the cipher capability.
286 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
287 * For-each data-unit in the operation, the tweak (IV) value is
288 * assigned consecutively starting from the operation assigned IV.
292 /** Symmetric Authentication / Hash Algorithms
294 * Note, to avoid ABI breakage across releases
295 * - LIST_END should not be added to this enum
296 * - the order of enums should not be changed
297 * - new algorithms should only be added to the end
299 enum rte_crypto_auth_algorithm {
300 RTE_CRYPTO_AUTH_NULL = 1,
301 /**< NULL hash algorithm. */
303 RTE_CRYPTO_AUTH_AES_CBC_MAC,
304 /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
305 RTE_CRYPTO_AUTH_AES_CMAC,
306 /**< AES CMAC algorithm. */
307 RTE_CRYPTO_AUTH_AES_GMAC,
308 /**< AES GMAC algorithm. */
309 RTE_CRYPTO_AUTH_AES_XCBC_MAC,
310 /**< AES XCBC algorithm. */
312 RTE_CRYPTO_AUTH_KASUMI_F9,
313 /**< KASUMI algorithm in F9 mode. */
316 /**< MD5 algorithm */
317 RTE_CRYPTO_AUTH_MD5_HMAC,
318 /**< HMAC using MD5 algorithm */
320 RTE_CRYPTO_AUTH_SHA1,
321 /**< 160 bit SHA algorithm. */
322 RTE_CRYPTO_AUTH_SHA1_HMAC,
323 /**< HMAC using 160 bit SHA algorithm.
324 * HMAC-SHA-1-96 can be generated by setting
325 * digest_length to 12 bytes in auth/aead xforms.
327 RTE_CRYPTO_AUTH_SHA224,
328 /**< 224 bit SHA algorithm. */
329 RTE_CRYPTO_AUTH_SHA224_HMAC,
330 /**< HMAC using 224 bit SHA algorithm. */
331 RTE_CRYPTO_AUTH_SHA256,
332 /**< 256 bit SHA algorithm. */
333 RTE_CRYPTO_AUTH_SHA256_HMAC,
334 /**< HMAC using 256 bit SHA algorithm. */
335 RTE_CRYPTO_AUTH_SHA384,
336 /**< 384 bit SHA algorithm. */
337 RTE_CRYPTO_AUTH_SHA384_HMAC,
338 /**< HMAC using 384 bit SHA algorithm. */
339 RTE_CRYPTO_AUTH_SHA512,
340 /**< 512 bit SHA algorithm. */
341 RTE_CRYPTO_AUTH_SHA512_HMAC,
342 /**< HMAC using 512 bit SHA algorithm. */
344 RTE_CRYPTO_AUTH_SNOW3G_UIA2,
345 /**< SNOW 3G algorithm in UIA2 mode. */
347 RTE_CRYPTO_AUTH_ZUC_EIA3,
348 /**< ZUC algorithm in EIA3 mode */
350 RTE_CRYPTO_AUTH_SHA3_224,
351 /**< 224 bit SHA3 algorithm. */
352 RTE_CRYPTO_AUTH_SHA3_224_HMAC,
353 /**< HMAC using 224 bit SHA3 algorithm. */
354 RTE_CRYPTO_AUTH_SHA3_256,
355 /**< 256 bit SHA3 algorithm. */
356 RTE_CRYPTO_AUTH_SHA3_256_HMAC,
357 /**< HMAC using 256 bit SHA3 algorithm. */
358 RTE_CRYPTO_AUTH_SHA3_384,
359 /**< 384 bit SHA3 algorithm. */
360 RTE_CRYPTO_AUTH_SHA3_384_HMAC,
361 /**< HMAC using 384 bit SHA3 algorithm. */
362 RTE_CRYPTO_AUTH_SHA3_512,
363 /**< 512 bit SHA3 algorithm. */
364 RTE_CRYPTO_AUTH_SHA3_512_HMAC
365 /**< HMAC using 512 bit SHA3 algorithm. */
368 /** Authentication algorithm name strings */
370 rte_crypto_auth_algorithm_strings[];
372 /** Symmetric Authentication / Hash Operations */
373 enum rte_crypto_auth_operation {
374 RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
375 RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
378 /** Authentication operation name strings */
380 rte_crypto_auth_operation_strings[];
383 * Authentication / Hash transform data.
385 * This structure contains data relating to an authentication/hash crypto
386 * transforms. The fields op, algo and digest_length are common to all
387 * authentication transforms and MUST be set.
389 struct rte_crypto_auth_xform {
390 enum rte_crypto_auth_operation op;
391 /**< Authentication operation type */
392 enum rte_crypto_auth_algorithm algo;
393 /**< Authentication algorithm selection */
396 const uint8_t *data; /**< pointer to key data */
397 uint16_t length; /**< key length in bytes */
399 /**< Authentication key data.
400 * The authentication key length MUST be less than or equal to the
401 * block size of the algorithm. It is the callers responsibility to
402 * ensure that the key length is compliant with the standard being used
403 * (for example RFC 2104, FIPS 198a).
408 /**< Starting point for Initialisation Vector or Counter,
409 * specified as number of bytes from start of crypto
410 * operation (rte_crypto_op).
412 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
413 * this is the authentication Initialisation Vector
414 * (IV) value. For AES-GMAC IV description please refer
415 * to the field `length` in iv struct.
417 * - For KASUMI in F9 mode and other authentication
418 * algorithms, this field is not used.
420 * For optimum performance, the data pointed to SHOULD
424 /**< Length of valid IV data.
426 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
427 * for AES-GMAC, this is the length of the IV.
429 * - For KASUMI in F9 mode and other authentication
430 * algorithms, this field is not used.
432 * - For GMAC mode, this is either:
433 * 1) Number greater or equal to one, which means that IV
434 * is used and J0 will be computed internally, a minimum
435 * of 16 bytes must be allocated.
436 * 2) Zero, in which case data points to J0. In this case
437 * 16 bytes of J0 should be passed where J0 is defined
441 } iv; /**< Initialisation vector parameters */
443 uint16_t digest_length;
444 /**< Length of the digest to be returned. If the verify option is set,
445 * this specifies the length of the digest to be compared for the
448 * It is the caller's responsibility to ensure that the
449 * digest length is compliant with the hash algorithm being used.
450 * If the value is less than the maximum length allowed by the hash,
451 * the result shall be truncated.
456 /** Symmetric AEAD Algorithms
458 * Note, to avoid ABI breakage across releases
459 * - LIST_END should not be added to this enum
460 * - the order of enums should not be changed
461 * - new algorithms should only be added to the end
463 enum rte_crypto_aead_algorithm {
464 RTE_CRYPTO_AEAD_AES_CCM = 1,
465 /**< AES algorithm in CCM mode. */
466 RTE_CRYPTO_AEAD_AES_GCM,
467 /**< AES algorithm in GCM mode. */
468 RTE_CRYPTO_AEAD_CHACHA20_POLY1305
469 /**< Chacha20 cipher with poly1305 authenticator */
472 /** AEAD algorithm name strings */
474 rte_crypto_aead_algorithm_strings[];
476 /** Symmetric AEAD Operations */
477 enum rte_crypto_aead_operation {
478 RTE_CRYPTO_AEAD_OP_ENCRYPT,
479 /**< Encrypt and generate digest */
480 RTE_CRYPTO_AEAD_OP_DECRYPT
481 /**< Verify digest and decrypt */
484 /** Authentication operation name strings */
486 rte_crypto_aead_operation_strings[];
488 struct rte_crypto_aead_xform {
489 enum rte_crypto_aead_operation op;
490 /**< AEAD operation type */
491 enum rte_crypto_aead_algorithm algo;
492 /**< AEAD algorithm selection */
495 const uint8_t *data; /**< pointer to key data */
496 uint16_t length; /**< key length in bytes */
501 /**< Starting point for Initialisation Vector or Counter,
502 * specified as number of bytes from start of crypto
503 * operation (rte_crypto_op).
505 * - For CCM mode, the first byte is reserved, and the
506 * nonce should be written starting at &iv[1] (to allow
507 * space for the implementation to write in the flags
508 * in the first byte). Note that a full 16 bytes should
509 * be allocated, even though the length field will
510 * have a value less than this.
512 * - For Chacha20-Poly1305 it is 96-bit nonce.
513 * PMD sets initial counter for Poly1305 key generation
514 * part to 0 and for Chacha20 encryption to 1 as per
515 * rfc8439 2.8. AEAD construction.
517 * For optimum performance, the data pointed to SHOULD
521 /**< Length of valid IV data.
523 * - For GCM mode, this is either:
524 * 1) Number greater or equal to one, which means that IV
525 * is used and J0 will be computed internally, a minimum
526 * of 16 bytes must be allocated.
527 * 2) Zero, in which case data points to J0. In this case
528 * 16 bytes of J0 should be passed where J0 is defined
531 * - For CCM mode, this is the length of the nonce,
532 * which can be in the range 7 to 13 inclusive.
534 * - For Chacha20-Poly1305 this field is always 12.
536 } iv; /**< Initialisation vector parameters */
538 uint16_t digest_length;
541 /**< The length of the additional authenticated data (AAD) in bytes.
542 * For CCM mode, this is the length of the actual AAD, even though
543 * it is required to reserve 18 bytes before the AAD and padding
544 * at the end of it, so a multiple of 16 bytes is allocated.
548 /** Crypto transformation types */
549 enum rte_crypto_sym_xform_type {
550 RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
551 RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
552 RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */
553 RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */
557 * Symmetric crypto transform structure.
559 * This is used to specify the crypto transforms required, multiple transforms
560 * can be chained together to specify a chain transforms such as authentication
561 * then cipher, or cipher then authentication. Each transform structure can
562 * hold a single transform, the type field is used to specify which transform
563 * is contained within the union
565 struct rte_crypto_sym_xform {
566 struct rte_crypto_sym_xform *next;
567 /**< next xform in chain */
568 enum rte_crypto_sym_xform_type type
572 struct rte_crypto_auth_xform auth;
573 /**< Authentication / hash xform */
574 struct rte_crypto_cipher_xform cipher;
576 struct rte_crypto_aead_xform aead;
581 struct rte_cryptodev_sym_session;
584 * Symmetric Cryptographic Operation.
586 * This structure contains data relating to performing symmetric cryptographic
587 * processing on a referenced mbuf data buffer.
589 * When a symmetric crypto operation is enqueued with the device for processing
590 * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
591 * which contains the source data which the crypto operation is to be performed
593 * While the mbuf is in use by a crypto operation no part of the mbuf should be
594 * changed by the application as the device may read or write to any part of the
595 * mbuf. In the case of hardware crypto devices some or all of the mbuf
596 * may be DMAed in and out of the device, so writing over the original data,
597 * though only the part specified by the rte_crypto_sym_op for transformation
599 * Out-of-place (OOP) operation, where the source mbuf is different to the
600 * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
601 * The part copied includes all the parts of the source mbuf that will be
602 * operated on, based on the cipher.data.offset+cipher.data.length and
603 * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
604 * indicated by the cipher parameters will be transformed, any extra data around
605 * this indicated by the auth parameters will be copied unchanged from source to
607 * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
608 * both source and destination mbufs. As these offsets are relative to the
609 * data_off parameter in each mbuf this can result in the data written to the
610 * destination buffer being at a different alignment, relative to buffer start,
611 * to the data in the source buffer.
613 struct rte_crypto_sym_op {
614 struct rte_mbuf *m_src; /**< source mbuf */
615 struct rte_mbuf *m_dst; /**< destination mbuf */
619 struct rte_cryptodev_sym_session *session;
620 /**< Handle for the initialised session context */
621 struct rte_crypto_sym_xform *xform;
622 /**< Session-less API crypto operation parameters */
623 struct rte_security_session *sec_session;
624 /**< Handle for the initialised security session context */
632 /**< Starting point for AEAD processing, specified as
633 * number of bytes from start of packet in source
637 /**< The message length, in bytes, of the source buffer
638 * on which the cryptographic operation will be
639 * computed. This must be a multiple of the block size
641 } data; /**< Data offsets and length for AEAD */
644 /**< This points to the location where the digest result
645 * should be inserted (in the case of digest generation)
646 * or where the purported digest exists (in the case of
647 * digest verification).
649 * At session creation time, the client specified the
650 * digest result length with the digest_length member
651 * of the @ref rte_crypto_auth_xform structure. For
652 * physical crypto devices the caller must allocate at
653 * least digest_length of physically contiguous memory
656 * For digest generation, the digest result will
657 * overwrite any data at this location.
660 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
661 * "digest result" read "authentication tag T".
663 rte_iova_t phys_addr;
664 /**< Physical address of digest */
665 } digest; /**< Digest parameters */
668 /**< Pointer to Additional Authenticated Data (AAD)
669 * needed for authenticated cipher mechanisms (CCM and
672 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
673 * the caller should setup this field as follows:
675 * - the additional authentication data itself should
676 * be written starting at an offset of 18 bytes into
677 * the array, leaving room for the first block (16 bytes)
678 * and the length encoding in the first two bytes of the
681 * - the array should be big enough to hold the above
682 * fields, plus any padding to round this up to the
683 * nearest multiple of the block size (16 bytes).
684 * Padding will be added by the implementation.
686 * - Note that PMDs may modify the memory reserved
687 * (first 18 bytes and the final padding).
689 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
690 * caller should setup this field as follows:
692 * - the AAD is written in starting at byte 0
693 * - the array must be big enough to hold the AAD, plus
694 * any space to round this up to the nearest multiple
695 * of the block size (16 bytes).
698 rte_iova_t phys_addr; /**< physical address */
700 /**< Additional authentication parameters */
707 /**< Starting point for cipher processing,
708 * specified as number of bytes from start
709 * of data in the source buffer.
710 * The result of the cipher operation will be
711 * written back into the output buffer
712 * starting at this location.
715 * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
716 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
717 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
718 * this field should be in bits. For
719 * digest-encrypted cases this must be
723 /**< The message length, in bytes, of the
724 * source buffer on which the cryptographic
725 * operation will be computed.
726 * This is also the same as the result length.
727 * This must be a multiple of the block size
728 * or a multiple of data-unit length
729 * as described in xform.
732 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
733 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
734 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
735 * this field should be in bits. For
736 * digest-encrypted cases this must be
739 } data; /**< Data offsets and length for ciphering */
745 /**< Starting point for hash processing,
746 * specified as number of bytes from start of
747 * packet in source buffer.
750 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
751 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
752 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
753 * this field should be in bits. For
754 * digest-encrypted cases this must be
758 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
759 * this offset should be such that
760 * data to authenticate starts at COUNT.
763 * For DOCSIS security protocol, this
764 * offset is the DOCSIS header length
765 * and, therefore, also the CRC offset
766 * i.e. the number of bytes into the
767 * packet at which CRC calculation
771 /**< The message length, in bytes, of the source
772 * buffer that the hash will be computed on.
775 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
776 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
777 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
778 * this field should be in bits. For
779 * digest-encrypted cases this must be
783 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
784 * the length should include the COUNT,
785 * FRESH, message, direction bit and padding
786 * (to be multiple of 8 bits).
789 * For DOCSIS security protocol, this
790 * is the CRC length i.e. the number of
791 * bytes in the packet over which the
792 * CRC should be calculated
795 /**< Data offsets and length for authentication */
799 /**< This points to the location where
800 * the digest result should be inserted
801 * (in the case of digest generation)
802 * or where the purported digest exists
803 * (in the case of digest verification).
805 * At session creation time, the client
806 * specified the digest result length with
807 * the digest_length member of the
808 * @ref rte_crypto_auth_xform structure.
809 * For physical crypto devices the caller
810 * must allocate at least digest_length of
811 * physically contiguous memory at this
814 * For digest generation, the digest result
815 * will overwrite any data at this location.
818 * Digest-encrypted case.
819 * Digest can be generated, appended to
820 * the end of raw data and encrypted
821 * together using chained digest
823 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
825 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
826 * xforms. Similarly, authentication
827 * of the raw data against appended,
828 * decrypted digest, can be performed
830 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
831 * and digest verification
832 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
834 * To perform those operations, a few
835 * additional conditions must be met:
836 * - caller must allocate at least
837 * digest_length of memory at the end of
838 * source and (in case of out-of-place
839 * operations) destination buffer; those
840 * buffers can be linear or split using
841 * scatter-gather lists,
842 * - digest data pointer must point to
843 * the end of source or (in case of
844 * out-of-place operations) destination
845 * data, which is pointer to the
846 * data buffer + auth.data.offset +
848 * - cipher.data.offset +
849 * cipher.data.length must be greater
850 * than auth.data.offset +
851 * auth.data.length and is typically
852 * equal to auth.data.offset +
853 * auth.data.length + digest_length.
854 * - for wireless algorithms, i.e.
855 * SNOW 3G, KASUMI and ZUC, as the
856 * cipher.data.length,
857 * cipher.data.offset,
858 * auth.data.length and
859 * auth.data.offset are in bits, they
860 * must be 8-bit multiples.
862 * Note, that for security reasons, it
863 * is PMDs' responsibility to not
864 * leave an unencrypted digest in any
865 * buffer after performing auth-cipher
869 rte_iova_t phys_addr;
870 /**< Physical address of digest */
871 } digest; /**< Digest parameters */
879 * Reset the fields of a symmetric operation to their default values.
881 * @param op The crypto operation to be reset.
884 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
886 memset(op, 0, sizeof(*op));
891 * Allocate space for symmetric crypto xforms in the private data space of the
892 * crypto operation. This also defaults the crypto xform type to
893 * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
894 * in the crypto operation
897 * - On success returns pointer to first crypto xform in crypto operations chain
898 * - On failure returns NULL
900 static inline struct rte_crypto_sym_xform *
901 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
902 void *priv_data, uint8_t nb_xforms)
904 struct rte_crypto_sym_xform *xform;
906 sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
909 xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
910 xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
913 return sym_op->xform;
918 * Attach a session to a symmetric crypto operation
920 * @param sym_op crypto operation
921 * @param sess cryptodev session
924 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
925 struct rte_cryptodev_sym_session *sess)
927 sym_op->session = sess;
933 * Converts portion of mbuf data into a vector representation.
934 * Each segment will be represented as a separate entry in *vec* array.
935 * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
937 * Pointer to the *rte_mbuf* object.
939 * Offset within mbuf data to start with.
941 * Length of data to represent.
943 * Pointer to an output array of IO vectors.
945 * Size of an output array.
947 * - number of successfully filled entries in *vec* array.
948 * - negative number of elements in *vec* array required.
952 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
953 struct rte_crypto_vec vec[], uint32_t num)
956 struct rte_mbuf *nseg;
960 /* assuming that requested data starts in the first segment */
961 RTE_ASSERT(mb->data_len > ofs);
963 if (mb->nb_segs > num)
966 vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
967 vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
968 vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
970 /* whole data lies in the first segment */
971 seglen = mb->data_len - ofs;
977 /* data spread across segments */
980 for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
982 vec[i].base = rte_pktmbuf_mtod(nseg, void *);
983 vec[i].iova = rte_pktmbuf_iova(nseg);
984 vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
986 seglen = nseg->data_len;
987 if (left <= seglen) {
988 /* whole requested data is completed */
994 /* use whole segment */
999 RTE_ASSERT(left == 0);
1008 #endif /* _RTE_CRYPTO_SYM_H_ */