1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
9 * @file rte_crypto_sym.h
11 * RTE Definitions for Symmetric Cryptography
13 * Defines symmetric cipher and authentication algorithms and modes, as well
14 * as supported symmetric crypto operation combinations.
24 #include <rte_memory.h>
25 #include <rte_mempool.h>
26 #include <rte_common.h>
29 * Crypto IO Vector (in analogy with struct iovec)
30 * Supposed be used to pass input/output data buffers for crypto data-path
33 struct rte_crypto_vec {
34 /** virtual address of the data buffer */
36 /** IOVA of the data buffer */
38 /** length of the data buffer */
43 * Crypto scatter-gather list descriptor. Consists of a pointer to an array
44 * of Crypto IO vectors with its size.
46 struct rte_crypto_sgl {
47 /** start of an array of vectors */
48 struct rte_crypto_vec *vec;
49 /** size of an array of vectors */
54 * Crypto virtual and IOVA address descriptor, used to describe cryptographic
55 * data buffer without the length information. The length information is
56 * normally predefined during session creation.
58 struct rte_crypto_va_iova_ptr {
64 * Raw data operation descriptor.
65 * Supposed to be used with synchronous CPU crypto API call or asynchronous
66 * RAW data path API call.
68 struct rte_crypto_sym_vec {
69 /** number of operations to perform */
71 /** array of SGL vectors */
72 struct rte_crypto_sgl *sgl;
73 /** array of pointers to cipher IV */
74 struct rte_crypto_va_iova_ptr *iv;
75 /** array of pointers to digest */
76 struct rte_crypto_va_iova_ptr *digest;
80 /** array of pointers to auth IV, used for chain operation */
81 struct rte_crypto_va_iova_ptr *auth_iv;
82 /** array of pointers to AAD, used for AEAD operation */
83 struct rte_crypto_va_iova_ptr *aad;
87 * array of statuses for each operation:
95 * used for cpu_crypto_process_bulk() to specify head/tail offsets
96 * for auth/cipher processing.
98 union rte_crypto_sym_ofs {
108 /** Symmetric Cipher Algorithms
110 * Note, to avoid ABI breakage across releases
111 * - LIST_END should not be added to this enum
112 * - the order of enums should not be changed
113 * - new algorithms should only be added to the end
115 enum rte_crypto_cipher_algorithm {
116 RTE_CRYPTO_CIPHER_NULL = 1,
117 /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
119 RTE_CRYPTO_CIPHER_3DES_CBC,
120 /**< Triple DES algorithm in CBC mode */
121 RTE_CRYPTO_CIPHER_3DES_CTR,
122 /**< Triple DES algorithm in CTR mode */
123 RTE_CRYPTO_CIPHER_3DES_ECB,
124 /**< Triple DES algorithm in ECB mode */
126 RTE_CRYPTO_CIPHER_AES_CBC,
127 /**< AES algorithm in CBC mode */
128 RTE_CRYPTO_CIPHER_AES_CTR,
129 /**< AES algorithm in Counter mode */
130 RTE_CRYPTO_CIPHER_AES_ECB,
131 /**< AES algorithm in ECB mode */
132 RTE_CRYPTO_CIPHER_AES_F8,
133 /**< AES algorithm in F8 mode */
134 RTE_CRYPTO_CIPHER_AES_XTS,
135 /**< AES algorithm in XTS mode */
137 RTE_CRYPTO_CIPHER_ARC4,
138 /**< (A)RC4 cipher algorithm */
140 RTE_CRYPTO_CIPHER_KASUMI_F8,
141 /**< KASUMI algorithm in F8 mode */
143 RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
144 /**< SNOW 3G algorithm in UEA2 mode */
146 RTE_CRYPTO_CIPHER_ZUC_EEA3,
147 /**< ZUC algorithm in EEA3 mode */
149 RTE_CRYPTO_CIPHER_DES_CBC,
150 /**< DES algorithm in CBC mode */
152 RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
153 /**< AES algorithm using modes required by
154 * DOCSIS Baseline Privacy Plus Spec.
155 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
156 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
159 RTE_CRYPTO_CIPHER_DES_DOCSISBPI
160 /**< DES algorithm using modes required by
161 * DOCSIS Baseline Privacy Plus Spec.
162 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
163 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
167 /** Cipher algorithm name strings */
169 rte_crypto_cipher_algorithm_strings[];
171 /** Symmetric Cipher Direction */
172 enum rte_crypto_cipher_operation {
173 RTE_CRYPTO_CIPHER_OP_ENCRYPT,
174 /**< Encrypt cipher operation */
175 RTE_CRYPTO_CIPHER_OP_DECRYPT
176 /**< Decrypt cipher operation */
179 /** Cipher operation name strings */
181 rte_crypto_cipher_operation_strings[];
184 * Symmetric Cipher Setup Data.
186 * This structure contains data relating to Cipher (Encryption and Decryption)
187 * use to create a session.
189 struct rte_crypto_cipher_xform {
190 enum rte_crypto_cipher_operation op;
191 /**< This parameter determines if the cipher operation is an encrypt or
192 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
193 * only encrypt operations are valid.
195 enum rte_crypto_cipher_algorithm algo;
196 /**< Cipher algorithm */
199 union { /* temporary anonymous union for ABI compatibility */
202 const uint8_t *data; /**< pointer to key data */
203 uint16_t length; /**< key length in bytes */
207 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
208 * original key data provided may be wrapped(encrypted) using key wrap
209 * algorithm such as AES key wrap (rfc3394) and hence length of the key
210 * may increase beyond the PMD advertised supported key size.
211 * PMD shall validate the key length and report EMSGSIZE error while
212 * configuring the session and application can skip checking the
213 * capability key length in such cases.
215 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
216 * point to a concatenation of the AES encryption key followed by a
217 * keymask. As per RFC3711, the keymask should be padded with trailing
218 * bytes to match the length of the encryption key used.
220 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
221 * 192 bits (24 bytes) or 256 bits (32 bytes).
223 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
224 * should be set to the combined length of the encryption key and the
225 * keymask. Since the keymask and the encryption key are the same size,
226 * key.length should be set to 2 x the AES encryption key length.
228 * For the AES-XTS mode of operation:
229 * - Two keys must be provided and key.length refers to total length of
231 * - key.data must point to the two keys concatenated together
233 * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
234 * - Both keys must have the same size.
238 struct { /* temporary anonymous struct for ABI compatibility */
239 const uint8_t *_key_data; /* reserved for key.data union */
240 uint16_t _key_length; /* reserved for key.length union */
241 /* next field can fill the padding hole */
243 uint16_t dataunit_len;
244 /**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
245 * this is the data-unit length of the algorithm,
246 * otherwise or when the value is 0, use the operation length.
247 * The value should be in the range defined by the dataunit_set field
248 * in the cipher capability.
250 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
251 * For-each data-unit in the operation, the tweak (IV) value is
252 * assigned consecutively starting from the operation assigned IV.
255 }; }; /* temporary struct nested in union for ABI compatibility */
259 /**< Starting point for Initialisation Vector or Counter,
260 * specified as number of bytes from start of crypto
261 * operation (rte_crypto_op).
263 * - For block ciphers in CBC or F8 mode, or for KASUMI
264 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
265 * Initialisation Vector (IV) value.
267 * - For block ciphers in CTR mode, this is the counter.
269 * - For CCM mode, the first byte is reserved, and the
270 * nonce should be written starting at &iv[1] (to allow
271 * space for the implementation to write in the flags
272 * in the first byte). Note that a full 16 bytes should
273 * be allocated, even though the length field will
274 * have a value less than this. Note that the PMDs may
275 * modify the memory reserved (the first byte and the
278 * - For AES-XTS, this is the 128bit tweak, i, from
279 * IEEE Std 1619-2007.
281 * For optimum performance, the data pointed to SHOULD
285 /**< Length of valid IV data.
287 * - For block ciphers in CBC or F8 mode, or for KASUMI
288 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
289 * length of the IV (which must be the same as the
290 * block length of the cipher).
292 * - For block ciphers in CTR mode, this is the length
293 * of the counter (which must be the same as the block
294 * length of the cipher).
296 * - For CCM mode, this is the length of the nonce,
297 * which can be in the range 7 to 13 inclusive.
299 } iv; /**< Initialisation vector parameters */
302 /** Symmetric Authentication / Hash Algorithms
304 * Note, to avoid ABI breakage across releases
305 * - LIST_END should not be added to this enum
306 * - the order of enums should not be changed
307 * - new algorithms should only be added to the end
309 enum rte_crypto_auth_algorithm {
310 RTE_CRYPTO_AUTH_NULL = 1,
311 /**< NULL hash algorithm. */
313 RTE_CRYPTO_AUTH_AES_CBC_MAC,
314 /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
315 RTE_CRYPTO_AUTH_AES_CMAC,
316 /**< AES CMAC algorithm. */
317 RTE_CRYPTO_AUTH_AES_GMAC,
318 /**< AES GMAC algorithm. */
319 RTE_CRYPTO_AUTH_AES_XCBC_MAC,
320 /**< AES XCBC algorithm. */
322 RTE_CRYPTO_AUTH_KASUMI_F9,
323 /**< KASUMI algorithm in F9 mode. */
326 /**< MD5 algorithm */
327 RTE_CRYPTO_AUTH_MD5_HMAC,
328 /**< HMAC using MD5 algorithm */
330 RTE_CRYPTO_AUTH_SHA1,
331 /**< 160 bit SHA algorithm. */
332 RTE_CRYPTO_AUTH_SHA1_HMAC,
333 /**< HMAC using 160 bit SHA algorithm.
334 * HMAC-SHA-1-96 can be generated by setting
335 * digest_length to 12 bytes in auth/aead xforms.
337 RTE_CRYPTO_AUTH_SHA224,
338 /**< 224 bit SHA algorithm. */
339 RTE_CRYPTO_AUTH_SHA224_HMAC,
340 /**< HMAC using 224 bit SHA algorithm. */
341 RTE_CRYPTO_AUTH_SHA256,
342 /**< 256 bit SHA algorithm. */
343 RTE_CRYPTO_AUTH_SHA256_HMAC,
344 /**< HMAC using 256 bit SHA algorithm. */
345 RTE_CRYPTO_AUTH_SHA384,
346 /**< 384 bit SHA algorithm. */
347 RTE_CRYPTO_AUTH_SHA384_HMAC,
348 /**< HMAC using 384 bit SHA algorithm. */
349 RTE_CRYPTO_AUTH_SHA512,
350 /**< 512 bit SHA algorithm. */
351 RTE_CRYPTO_AUTH_SHA512_HMAC,
352 /**< HMAC using 512 bit SHA algorithm. */
354 RTE_CRYPTO_AUTH_SNOW3G_UIA2,
355 /**< SNOW 3G algorithm in UIA2 mode. */
357 RTE_CRYPTO_AUTH_ZUC_EIA3,
358 /**< ZUC algorithm in EIA3 mode */
360 RTE_CRYPTO_AUTH_SHA3_224,
361 /**< 224 bit SHA3 algorithm. */
362 RTE_CRYPTO_AUTH_SHA3_224_HMAC,
363 /**< HMAC using 224 bit SHA3 algorithm. */
364 RTE_CRYPTO_AUTH_SHA3_256,
365 /**< 256 bit SHA3 algorithm. */
366 RTE_CRYPTO_AUTH_SHA3_256_HMAC,
367 /**< HMAC using 256 bit SHA3 algorithm. */
368 RTE_CRYPTO_AUTH_SHA3_384,
369 /**< 384 bit SHA3 algorithm. */
370 RTE_CRYPTO_AUTH_SHA3_384_HMAC,
371 /**< HMAC using 384 bit SHA3 algorithm. */
372 RTE_CRYPTO_AUTH_SHA3_512,
373 /**< 512 bit SHA3 algorithm. */
374 RTE_CRYPTO_AUTH_SHA3_512_HMAC
375 /**< HMAC using 512 bit SHA3 algorithm. */
378 /** Authentication algorithm name strings */
380 rte_crypto_auth_algorithm_strings[];
382 /** Symmetric Authentication / Hash Operations */
383 enum rte_crypto_auth_operation {
384 RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
385 RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
388 /** Authentication operation name strings */
390 rte_crypto_auth_operation_strings[];
393 * Authentication / Hash transform data.
395 * This structure contains data relating to an authentication/hash crypto
396 * transforms. The fields op, algo and digest_length are common to all
397 * authentication transforms and MUST be set.
399 struct rte_crypto_auth_xform {
400 enum rte_crypto_auth_operation op;
401 /**< Authentication operation type */
402 enum rte_crypto_auth_algorithm algo;
403 /**< Authentication algorithm selection */
406 const uint8_t *data; /**< pointer to key data */
407 uint16_t length; /**< key length in bytes */
409 /**< Authentication key data.
410 * The authentication key length MUST be less than or equal to the
411 * block size of the algorithm. It is the callers responsibility to
412 * ensure that the key length is compliant with the standard being used
413 * (for example RFC 2104, FIPS 198a).
418 /**< Starting point for Initialisation Vector or Counter,
419 * specified as number of bytes from start of crypto
420 * operation (rte_crypto_op).
422 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
423 * this is the authentication Initialisation Vector
424 * (IV) value. For AES-GMAC IV description please refer
425 * to the field `length` in iv struct.
427 * - For KASUMI in F9 mode and other authentication
428 * algorithms, this field is not used.
430 * For optimum performance, the data pointed to SHOULD
434 /**< Length of valid IV data.
436 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
437 * for AES-GMAC, this is the length of the IV.
439 * - For KASUMI in F9 mode and other authentication
440 * algorithms, this field is not used.
442 * - For GMAC mode, this is either:
443 * 1) Number greater or equal to one, which means that IV
444 * is used and J0 will be computed internally, a minimum
445 * of 16 bytes must be allocated.
446 * 2) Zero, in which case data points to J0. In this case
447 * 16 bytes of J0 should be passed where J0 is defined
451 } iv; /**< Initialisation vector parameters */
453 uint16_t digest_length;
454 /**< Length of the digest to be returned. If the verify option is set,
455 * this specifies the length of the digest to be compared for the
458 * It is the caller's responsibility to ensure that the
459 * digest length is compliant with the hash algorithm being used.
460 * If the value is less than the maximum length allowed by the hash,
461 * the result shall be truncated.
466 /** Symmetric AEAD Algorithms
468 * Note, to avoid ABI breakage across releases
469 * - LIST_END should not be added to this enum
470 * - the order of enums should not be changed
471 * - new algorithms should only be added to the end
473 enum rte_crypto_aead_algorithm {
474 RTE_CRYPTO_AEAD_AES_CCM = 1,
475 /**< AES algorithm in CCM mode. */
476 RTE_CRYPTO_AEAD_AES_GCM,
477 /**< AES algorithm in GCM mode. */
478 RTE_CRYPTO_AEAD_CHACHA20_POLY1305
479 /**< Chacha20 cipher with poly1305 authenticator */
482 /** AEAD algorithm name strings */
484 rte_crypto_aead_algorithm_strings[];
486 /** Symmetric AEAD Operations */
487 enum rte_crypto_aead_operation {
488 RTE_CRYPTO_AEAD_OP_ENCRYPT,
489 /**< Encrypt and generate digest */
490 RTE_CRYPTO_AEAD_OP_DECRYPT
491 /**< Verify digest and decrypt */
494 /** Authentication operation name strings */
496 rte_crypto_aead_operation_strings[];
498 struct rte_crypto_aead_xform {
499 enum rte_crypto_aead_operation op;
500 /**< AEAD operation type */
501 enum rte_crypto_aead_algorithm algo;
502 /**< AEAD algorithm selection */
505 const uint8_t *data; /**< pointer to key data */
506 uint16_t length; /**< key length in bytes */
511 /**< Starting point for Initialisation Vector or Counter,
512 * specified as number of bytes from start of crypto
513 * operation (rte_crypto_op).
515 * - For CCM mode, the first byte is reserved, and the
516 * nonce should be written starting at &iv[1] (to allow
517 * space for the implementation to write in the flags
518 * in the first byte). Note that a full 16 bytes should
519 * be allocated, even though the length field will
520 * have a value less than this.
522 * - For Chacha20-Poly1305 it is 96-bit nonce.
523 * PMD sets initial counter for Poly1305 key generation
524 * part to 0 and for Chacha20 encryption to 1 as per
525 * rfc8439 2.8. AEAD construction.
527 * For optimum performance, the data pointed to SHOULD
531 /**< Length of valid IV data.
533 * - For GCM mode, this is either:
534 * 1) Number greater or equal to one, which means that IV
535 * is used and J0 will be computed internally, a minimum
536 * of 16 bytes must be allocated.
537 * 2) Zero, in which case data points to J0. In this case
538 * 16 bytes of J0 should be passed where J0 is defined
541 * - For CCM mode, this is the length of the nonce,
542 * which can be in the range 7 to 13 inclusive.
544 * - For Chacha20-Poly1305 this field is always 12.
546 } iv; /**< Initialisation vector parameters */
548 uint16_t digest_length;
551 /**< The length of the additional authenticated data (AAD) in bytes.
552 * For CCM mode, this is the length of the actual AAD, even though
553 * it is required to reserve 18 bytes before the AAD and padding
554 * at the end of it, so a multiple of 16 bytes is allocated.
558 /** Crypto transformation types */
559 enum rte_crypto_sym_xform_type {
560 RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
561 RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
562 RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */
563 RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */
567 * Symmetric crypto transform structure.
569 * This is used to specify the crypto transforms required, multiple transforms
570 * can be chained together to specify a chain transforms such as authentication
571 * then cipher, or cipher then authentication. Each transform structure can
572 * hold a single transform, the type field is used to specify which transform
573 * is contained within the union
575 struct rte_crypto_sym_xform {
576 struct rte_crypto_sym_xform *next;
577 /**< next xform in chain */
578 enum rte_crypto_sym_xform_type type
582 struct rte_crypto_auth_xform auth;
583 /**< Authentication / hash xform */
584 struct rte_crypto_cipher_xform cipher;
586 struct rte_crypto_aead_xform aead;
591 struct rte_cryptodev_sym_session;
594 * Symmetric Cryptographic Operation.
596 * This structure contains data relating to performing symmetric cryptographic
597 * processing on a referenced mbuf data buffer.
599 * When a symmetric crypto operation is enqueued with the device for processing
600 * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
601 * which contains the source data which the crypto operation is to be performed
603 * While the mbuf is in use by a crypto operation no part of the mbuf should be
604 * changed by the application as the device may read or write to any part of the
605 * mbuf. In the case of hardware crypto devices some or all of the mbuf
606 * may be DMAed in and out of the device, so writing over the original data,
607 * though only the part specified by the rte_crypto_sym_op for transformation
609 * Out-of-place (OOP) operation, where the source mbuf is different to the
610 * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
611 * The part copied includes all the parts of the source mbuf that will be
612 * operated on, based on the cipher.data.offset+cipher.data.length and
613 * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
614 * indicated by the cipher parameters will be transformed, any extra data around
615 * this indicated by the auth parameters will be copied unchanged from source to
617 * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
618 * both source and destination mbufs. As these offsets are relative to the
619 * data_off parameter in each mbuf this can result in the data written to the
620 * destination buffer being at a different alignment, relative to buffer start,
621 * to the data in the source buffer.
623 struct rte_crypto_sym_op {
624 struct rte_mbuf *m_src; /**< source mbuf */
625 struct rte_mbuf *m_dst; /**< destination mbuf */
629 struct rte_cryptodev_sym_session *session;
630 /**< Handle for the initialised session context */
631 struct rte_crypto_sym_xform *xform;
632 /**< Session-less API crypto operation parameters */
633 struct rte_security_session *sec_session;
634 /**< Handle for the initialised security session context */
642 /**< Starting point for AEAD processing, specified as
643 * number of bytes from start of packet in source
647 /**< The message length, in bytes, of the source buffer
648 * on which the cryptographic operation will be
649 * computed. This must be a multiple of the block size
651 } data; /**< Data offsets and length for AEAD */
654 /**< This points to the location where the digest result
655 * should be inserted (in the case of digest generation)
656 * or where the purported digest exists (in the case of
657 * digest verification).
659 * At session creation time, the client specified the
660 * digest result length with the digest_length member
661 * of the @ref rte_crypto_auth_xform structure. For
662 * physical crypto devices the caller must allocate at
663 * least digest_length of physically contiguous memory
666 * For digest generation, the digest result will
667 * overwrite any data at this location.
670 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
671 * "digest result" read "authentication tag T".
673 rte_iova_t phys_addr;
674 /**< Physical address of digest */
675 } digest; /**< Digest parameters */
678 /**< Pointer to Additional Authenticated Data (AAD)
679 * needed for authenticated cipher mechanisms (CCM and
682 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
683 * the caller should setup this field as follows:
685 * - the additional authentication data itself should
686 * be written starting at an offset of 18 bytes into
687 * the array, leaving room for the first block (16 bytes)
688 * and the length encoding in the first two bytes of the
691 * - the array should be big enough to hold the above
692 * fields, plus any padding to round this up to the
693 * nearest multiple of the block size (16 bytes).
694 * Padding will be added by the implementation.
696 * - Note that PMDs may modify the memory reserved
697 * (first 18 bytes and the final padding).
699 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
700 * caller should setup this field as follows:
702 * - the AAD is written in starting at byte 0
703 * - the array must be big enough to hold the AAD, plus
704 * any space to round this up to the nearest multiple
705 * of the block size (16 bytes).
708 rte_iova_t phys_addr; /**< physical address */
710 /**< Additional authentication parameters */
717 /**< Starting point for cipher processing,
718 * specified as number of bytes from start
719 * of data in the source buffer.
720 * The result of the cipher operation will be
721 * written back into the output buffer
722 * starting at this location.
725 * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
726 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
727 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
728 * this field should be in bits. For
729 * digest-encrypted cases this must be
733 /**< The message length, in bytes, of the
734 * source buffer on which the cryptographic
735 * operation will be computed.
736 * This is also the same as the result length.
737 * This must be a multiple of the block size
738 * or a multiple of data-unit length
739 * as described in xform.
742 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
743 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
744 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
745 * this field should be in bits. For
746 * digest-encrypted cases this must be
749 } data; /**< Data offsets and length for ciphering */
755 /**< Starting point for hash processing,
756 * specified as number of bytes from start of
757 * packet in source buffer.
760 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
761 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
762 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
763 * this field should be in bits. For
764 * digest-encrypted cases this must be
768 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
769 * this offset should be such that
770 * data to authenticate starts at COUNT.
773 * For DOCSIS security protocol, this
774 * offset is the DOCSIS header length
775 * and, therefore, also the CRC offset
776 * i.e. the number of bytes into the
777 * packet at which CRC calculation
781 /**< The message length, in bytes, of the source
782 * buffer that the hash will be computed on.
785 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
786 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
787 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
788 * this field should be in bits. For
789 * digest-encrypted cases this must be
793 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
794 * the length should include the COUNT,
795 * FRESH, message, direction bit and padding
796 * (to be multiple of 8 bits).
799 * For DOCSIS security protocol, this
800 * is the CRC length i.e. the number of
801 * bytes in the packet over which the
802 * CRC should be calculated
805 /**< Data offsets and length for authentication */
809 /**< This points to the location where
810 * the digest result should be inserted
811 * (in the case of digest generation)
812 * or where the purported digest exists
813 * (in the case of digest verification).
815 * At session creation time, the client
816 * specified the digest result length with
817 * the digest_length member of the
818 * @ref rte_crypto_auth_xform structure.
819 * For physical crypto devices the caller
820 * must allocate at least digest_length of
821 * physically contiguous memory at this
824 * For digest generation, the digest result
825 * will overwrite any data at this location.
828 * Digest-encrypted case.
829 * Digest can be generated, appended to
830 * the end of raw data and encrypted
831 * together using chained digest
833 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
835 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
836 * xforms. Similarly, authentication
837 * of the raw data against appended,
838 * decrypted digest, can be performed
840 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
841 * and digest verification
842 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
844 * To perform those operations, a few
845 * additional conditions must be met:
846 * - caller must allocate at least
847 * digest_length of memory at the end of
848 * source and (in case of out-of-place
849 * operations) destination buffer; those
850 * buffers can be linear or split using
851 * scatter-gather lists,
852 * - digest data pointer must point to
853 * the end of source or (in case of
854 * out-of-place operations) destination
855 * data, which is pointer to the
856 * data buffer + auth.data.offset +
858 * - cipher.data.offset +
859 * cipher.data.length must be greater
860 * than auth.data.offset +
861 * auth.data.length and is typically
862 * equal to auth.data.offset +
863 * auth.data.length + digest_length.
864 * - for wireless algorithms, i.e.
865 * SNOW 3G, KASUMI and ZUC, as the
866 * cipher.data.length,
867 * cipher.data.offset,
868 * auth.data.length and
869 * auth.data.offset are in bits, they
870 * must be 8-bit multiples.
872 * Note, that for security reasons, it
873 * is PMDs' responsibility to not
874 * leave an unencrypted digest in any
875 * buffer after performing auth-cipher
879 rte_iova_t phys_addr;
880 /**< Physical address of digest */
881 } digest; /**< Digest parameters */
889 * Reset the fields of a symmetric operation to their default values.
891 * @param op The crypto operation to be reset.
894 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
896 memset(op, 0, sizeof(*op));
901 * Allocate space for symmetric crypto xforms in the private data space of the
902 * crypto operation. This also defaults the crypto xform type to
903 * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
904 * in the crypto operation
907 * - On success returns pointer to first crypto xform in crypto operations chain
908 * - On failure returns NULL
910 static inline struct rte_crypto_sym_xform *
911 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
912 void *priv_data, uint8_t nb_xforms)
914 struct rte_crypto_sym_xform *xform;
916 sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
919 xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
920 xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
923 return sym_op->xform;
928 * Attach a session to a symmetric crypto operation
930 * @param sym_op crypto operation
931 * @param sess cryptodev session
934 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
935 struct rte_cryptodev_sym_session *sess)
937 sym_op->session = sess;
943 * Converts portion of mbuf data into a vector representation.
944 * Each segment will be represented as a separate entry in *vec* array.
945 * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
947 * Pointer to the *rte_mbuf* object.
949 * Offset within mbuf data to start with.
951 * Length of data to represent.
953 * Pointer to an output array of IO vectors.
955 * Size of an output array.
957 * - number of successfully filled entries in *vec* array.
958 * - negative number of elements in *vec* array required.
962 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
963 struct rte_crypto_vec vec[], uint32_t num)
966 struct rte_mbuf *nseg;
970 /* assuming that requested data starts in the first segment */
971 RTE_ASSERT(mb->data_len > ofs);
973 if (mb->nb_segs > num)
976 vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
977 vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
979 /* whole data lies in the first segment */
980 seglen = mb->data_len - ofs;
986 /* data spread across segments */
989 for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
991 vec[i].base = rte_pktmbuf_mtod(nseg, void *);
992 vec[i].iova = rte_pktmbuf_iova(nseg);
994 seglen = nseg->data_len;
995 if (left <= seglen) {
996 /* whole requested data is completed */
1002 /* use whole segment */
1003 vec[i].len = seglen;
1007 RTE_ASSERT(left == 0);
1016 #endif /* _RTE_CRYPTO_SYM_H_ */