1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
9 * @file rte_crypto_sym.h
11 * RTE Definitions for Symmetric Cryptography
13 * Defines symmetric cipher and authentication algorithms and modes, as well
14 * as supported symmetric crypto operation combinations.
24 #include <rte_memory.h>
25 #include <rte_mempool.h>
26 #include <rte_common.h>
29 * Crypto IO Vector (in analogy with struct iovec)
30 * Supposed be used to pass input/output data buffers for crypto data-path
33 struct rte_crypto_vec {
34 /** virtual address of the data buffer */
36 /** IOVA of the data buffer */
38 /** length of the data buffer */
43 * Crypto scatter-gather list descriptor. Consists of a pointer to an array
44 * of Crypto IO vectors with its size.
46 struct rte_crypto_sgl {
47 /** start of an array of vectors */
48 struct rte_crypto_vec *vec;
49 /** size of an array of vectors */
54 * Synchronous operation descriptor.
55 * Supposed to be used with CPU crypto API call.
57 struct rte_crypto_sym_vec {
58 /** array of SGL vectors */
59 struct rte_crypto_sgl *sgl;
60 /** array of pointers to IV */
62 /** array of pointers to AAD */
64 /** array of pointers to digest */
67 * array of statuses for each operation:
72 /** number of operations to perform */
77 * used for cpu_crypto_process_bulk() to specify head/tail offsets
78 * for auth/cipher processing.
80 union rte_crypto_sym_ofs {
90 /** Symmetric Cipher Algorithms */
91 enum rte_crypto_cipher_algorithm {
92 RTE_CRYPTO_CIPHER_NULL = 1,
93 /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
95 RTE_CRYPTO_CIPHER_3DES_CBC,
96 /**< Triple DES algorithm in CBC mode */
97 RTE_CRYPTO_CIPHER_3DES_CTR,
98 /**< Triple DES algorithm in CTR mode */
99 RTE_CRYPTO_CIPHER_3DES_ECB,
100 /**< Triple DES algorithm in ECB mode */
102 RTE_CRYPTO_CIPHER_AES_CBC,
103 /**< AES algorithm in CBC mode */
104 RTE_CRYPTO_CIPHER_AES_CTR,
105 /**< AES algorithm in Counter mode */
106 RTE_CRYPTO_CIPHER_AES_ECB,
107 /**< AES algorithm in ECB mode */
108 RTE_CRYPTO_CIPHER_AES_F8,
109 /**< AES algorithm in F8 mode */
110 RTE_CRYPTO_CIPHER_AES_XTS,
111 /**< AES algorithm in XTS mode */
113 RTE_CRYPTO_CIPHER_ARC4,
114 /**< (A)RC4 cipher algorithm */
116 RTE_CRYPTO_CIPHER_KASUMI_F8,
117 /**< KASUMI algorithm in F8 mode */
119 RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
120 /**< SNOW 3G algorithm in UEA2 mode */
122 RTE_CRYPTO_CIPHER_ZUC_EEA3,
123 /**< ZUC algorithm in EEA3 mode */
125 RTE_CRYPTO_CIPHER_DES_CBC,
126 /**< DES algorithm in CBC mode */
128 RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
129 /**< AES algorithm using modes required by
130 * DOCSIS Baseline Privacy Plus Spec.
131 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
132 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
135 RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
136 /**< DES algorithm using modes required by
137 * DOCSIS Baseline Privacy Plus Spec.
138 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
139 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
142 RTE_CRYPTO_CIPHER_LIST_END
146 /** Cipher algorithm name strings */
148 rte_crypto_cipher_algorithm_strings[];
150 /** Symmetric Cipher Direction */
151 enum rte_crypto_cipher_operation {
152 RTE_CRYPTO_CIPHER_OP_ENCRYPT,
153 /**< Encrypt cipher operation */
154 RTE_CRYPTO_CIPHER_OP_DECRYPT
155 /**< Decrypt cipher operation */
158 /** Cipher operation name strings */
160 rte_crypto_cipher_operation_strings[];
163 * Symmetric Cipher Setup Data.
165 * This structure contains data relating to Cipher (Encryption and Decryption)
166 * use to create a session.
168 struct rte_crypto_cipher_xform {
169 enum rte_crypto_cipher_operation op;
170 /**< This parameter determines if the cipher operation is an encrypt or
171 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
172 * only encrypt operations are valid.
174 enum rte_crypto_cipher_algorithm algo;
175 /**< Cipher algorithm */
178 const uint8_t *data; /**< pointer to key data */
179 uint16_t length; /**< key length in bytes */
183 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
184 * point to a concatenation of the AES encryption key followed by a
185 * keymask. As per RFC3711, the keymask should be padded with trailing
186 * bytes to match the length of the encryption key used.
188 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
189 * 192 bits (24 bytes) or 256 bits (32 bytes).
191 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
192 * should be set to the combined length of the encryption key and the
193 * keymask. Since the keymask and the encryption key are the same size,
194 * key.length should be set to 2 x the AES encryption key length.
196 * For the AES-XTS mode of operation:
197 * - Two keys must be provided and key.length refers to total length of
199 * - key.data must point to the two keys concatenated together
201 * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
202 * - Both keys must have the same size.
206 /**< Starting point for Initialisation Vector or Counter,
207 * specified as number of bytes from start of crypto
208 * operation (rte_crypto_op).
210 * - For block ciphers in CBC or F8 mode, or for KASUMI
211 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
212 * Initialisation Vector (IV) value.
214 * - For block ciphers in CTR mode, this is the counter.
216 * - For CCM mode, the first byte is reserved, and the
217 * nonce should be written starting at &iv[1] (to allow
218 * space for the implementation to write in the flags
219 * in the first byte). Note that a full 16 bytes should
220 * be allocated, even though the length field will
221 * have a value less than this. Note that the PMDs may
222 * modify the memory reserved (the first byte and the
225 * - For AES-XTS, this is the 128bit tweak, i, from
226 * IEEE Std 1619-2007.
228 * For optimum performance, the data pointed to SHOULD
232 /**< Length of valid IV data.
234 * - For block ciphers in CBC or F8 mode, or for KASUMI
235 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
236 * length of the IV (which must be the same as the
237 * block length of the cipher).
239 * - For block ciphers in CTR mode, this is the length
240 * of the counter (which must be the same as the block
241 * length of the cipher).
243 * - For CCM mode, this is the length of the nonce,
244 * which can be in the range 7 to 13 inclusive.
246 } iv; /**< Initialisation vector parameters */
249 /** Symmetric Authentication / Hash Algorithms */
250 enum rte_crypto_auth_algorithm {
251 RTE_CRYPTO_AUTH_NULL = 1,
252 /**< NULL hash algorithm. */
254 RTE_CRYPTO_AUTH_AES_CBC_MAC,
255 /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
256 RTE_CRYPTO_AUTH_AES_CMAC,
257 /**< AES CMAC algorithm. */
258 RTE_CRYPTO_AUTH_AES_GMAC,
259 /**< AES GMAC algorithm. */
260 RTE_CRYPTO_AUTH_AES_XCBC_MAC,
261 /**< AES XCBC algorithm. */
263 RTE_CRYPTO_AUTH_KASUMI_F9,
264 /**< KASUMI algorithm in F9 mode. */
267 /**< MD5 algorithm */
268 RTE_CRYPTO_AUTH_MD5_HMAC,
269 /**< HMAC using MD5 algorithm */
271 RTE_CRYPTO_AUTH_SHA1,
272 /**< 160 bit SHA algorithm. */
273 RTE_CRYPTO_AUTH_SHA1_HMAC,
274 /**< HMAC using 160 bit SHA algorithm.
275 * HMAC-SHA-1-96 can be generated by setting
276 * digest_length to 12 bytes in auth/aead xforms.
278 RTE_CRYPTO_AUTH_SHA224,
279 /**< 224 bit SHA algorithm. */
280 RTE_CRYPTO_AUTH_SHA224_HMAC,
281 /**< HMAC using 224 bit SHA algorithm. */
282 RTE_CRYPTO_AUTH_SHA256,
283 /**< 256 bit SHA algorithm. */
284 RTE_CRYPTO_AUTH_SHA256_HMAC,
285 /**< HMAC using 256 bit SHA algorithm. */
286 RTE_CRYPTO_AUTH_SHA384,
287 /**< 384 bit SHA algorithm. */
288 RTE_CRYPTO_AUTH_SHA384_HMAC,
289 /**< HMAC using 384 bit SHA algorithm. */
290 RTE_CRYPTO_AUTH_SHA512,
291 /**< 512 bit SHA algorithm. */
292 RTE_CRYPTO_AUTH_SHA512_HMAC,
293 /**< HMAC using 512 bit SHA algorithm. */
295 RTE_CRYPTO_AUTH_SNOW3G_UIA2,
296 /**< SNOW 3G algorithm in UIA2 mode. */
298 RTE_CRYPTO_AUTH_ZUC_EIA3,
299 /**< ZUC algorithm in EIA3 mode */
301 RTE_CRYPTO_AUTH_SHA3_224,
302 /**< 224 bit SHA3 algorithm. */
303 RTE_CRYPTO_AUTH_SHA3_224_HMAC,
304 /**< HMAC using 224 bit SHA3 algorithm. */
305 RTE_CRYPTO_AUTH_SHA3_256,
306 /**< 256 bit SHA3 algorithm. */
307 RTE_CRYPTO_AUTH_SHA3_256_HMAC,
308 /**< HMAC using 256 bit SHA3 algorithm. */
309 RTE_CRYPTO_AUTH_SHA3_384,
310 /**< 384 bit SHA3 algorithm. */
311 RTE_CRYPTO_AUTH_SHA3_384_HMAC,
312 /**< HMAC using 384 bit SHA3 algorithm. */
313 RTE_CRYPTO_AUTH_SHA3_512,
314 /**< 512 bit SHA3 algorithm. */
315 RTE_CRYPTO_AUTH_SHA3_512_HMAC,
316 /**< HMAC using 512 bit SHA3 algorithm. */
318 RTE_CRYPTO_AUTH_LIST_END
321 /** Authentication algorithm name strings */
323 rte_crypto_auth_algorithm_strings[];
325 /** Symmetric Authentication / Hash Operations */
326 enum rte_crypto_auth_operation {
327 RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
328 RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
331 /** Authentication operation name strings */
333 rte_crypto_auth_operation_strings[];
336 * Authentication / Hash transform data.
338 * This structure contains data relating to an authentication/hash crypto
339 * transforms. The fields op, algo and digest_length are common to all
340 * authentication transforms and MUST be set.
342 struct rte_crypto_auth_xform {
343 enum rte_crypto_auth_operation op;
344 /**< Authentication operation type */
345 enum rte_crypto_auth_algorithm algo;
346 /**< Authentication algorithm selection */
349 const uint8_t *data; /**< pointer to key data */
350 uint16_t length; /**< key length in bytes */
352 /**< Authentication key data.
353 * The authentication key length MUST be less than or equal to the
354 * block size of the algorithm. It is the callers responsibility to
355 * ensure that the key length is compliant with the standard being used
356 * (for example RFC 2104, FIPS 198a).
361 /**< Starting point for Initialisation Vector or Counter,
362 * specified as number of bytes from start of crypto
363 * operation (rte_crypto_op).
365 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
366 * this is the authentication Initialisation Vector
367 * (IV) value. For AES-GMAC IV description please refer
368 * to the field `length` in iv struct.
370 * - For KASUMI in F9 mode and other authentication
371 * algorithms, this field is not used.
373 * For optimum performance, the data pointed to SHOULD
377 /**< Length of valid IV data.
379 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
380 * for AES-GMAC, this is the length of the IV.
382 * - For KASUMI in F9 mode and other authentication
383 * algorithms, this field is not used.
385 * - For GMAC mode, this is either:
386 * 1) Number greater or equal to one, which means that IV
387 * is used and J0 will be computed internally, a minimum
388 * of 16 bytes must be allocated.
389 * 2) Zero, in which case data points to J0. In this case
390 * 16 bytes of J0 should be passed where J0 is defined
394 } iv; /**< Initialisation vector parameters */
396 uint16_t digest_length;
397 /**< Length of the digest to be returned. If the verify option is set,
398 * this specifies the length of the digest to be compared for the
401 * It is the caller's responsibility to ensure that the
402 * digest length is compliant with the hash algorithm being used.
403 * If the value is less than the maximum length allowed by the hash,
404 * the result shall be truncated.
409 /** Symmetric AEAD Algorithms */
410 enum rte_crypto_aead_algorithm {
411 RTE_CRYPTO_AEAD_AES_CCM = 1,
412 /**< AES algorithm in CCM mode. */
413 RTE_CRYPTO_AEAD_AES_GCM,
414 /**< AES algorithm in GCM mode. */
415 RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
416 /**< Chacha20 cipher with poly1305 authenticator */
417 RTE_CRYPTO_AEAD_LIST_END
420 /** AEAD algorithm name strings */
422 rte_crypto_aead_algorithm_strings[];
424 /** Symmetric AEAD Operations */
425 enum rte_crypto_aead_operation {
426 RTE_CRYPTO_AEAD_OP_ENCRYPT,
427 /**< Encrypt and generate digest */
428 RTE_CRYPTO_AEAD_OP_DECRYPT
429 /**< Verify digest and decrypt */
432 /** Authentication operation name strings */
434 rte_crypto_aead_operation_strings[];
436 struct rte_crypto_aead_xform {
437 enum rte_crypto_aead_operation op;
438 /**< AEAD operation type */
439 enum rte_crypto_aead_algorithm algo;
440 /**< AEAD algorithm selection */
443 const uint8_t *data; /**< pointer to key data */
444 uint16_t length; /**< key length in bytes */
449 /**< Starting point for Initialisation Vector or Counter,
450 * specified as number of bytes from start of crypto
451 * operation (rte_crypto_op).
453 * - For CCM mode, the first byte is reserved, and the
454 * nonce should be written starting at &iv[1] (to allow
455 * space for the implementation to write in the flags
456 * in the first byte). Note that a full 16 bytes should
457 * be allocated, even though the length field will
458 * have a value less than this.
460 * - For Chacha20-Poly1305 it is 96-bit nonce.
461 * PMD sets initial counter for Poly1305 key generation
462 * part to 0 and for Chacha20 encryption to 1 as per
463 * rfc8439 2.8. AEAD construction.
465 * For optimum performance, the data pointed to SHOULD
469 /**< Length of valid IV data.
471 * - For GCM mode, this is either:
472 * 1) Number greater or equal to one, which means that IV
473 * is used and J0 will be computed internally, a minimum
474 * of 16 bytes must be allocated.
475 * 2) Zero, in which case data points to J0. In this case
476 * 16 bytes of J0 should be passed where J0 is defined
479 * - For CCM mode, this is the length of the nonce,
480 * which can be in the range 7 to 13 inclusive.
482 * - For Chacha20-Poly1305 this field is always 12.
484 } iv; /**< Initialisation vector parameters */
486 uint16_t digest_length;
489 /**< The length of the additional authenticated data (AAD) in bytes.
490 * For CCM mode, this is the length of the actual AAD, even though
491 * it is required to reserve 18 bytes before the AAD and padding
492 * at the end of it, so a multiple of 16 bytes is allocated.
496 /** Crypto transformation types */
497 enum rte_crypto_sym_xform_type {
498 RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
499 RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
500 RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */
501 RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */
505 * Symmetric crypto transform structure.
507 * This is used to specify the crypto transforms required, multiple transforms
508 * can be chained together to specify a chain transforms such as authentication
509 * then cipher, or cipher then authentication. Each transform structure can
510 * hold a single transform, the type field is used to specify which transform
511 * is contained within the union
513 struct rte_crypto_sym_xform {
514 struct rte_crypto_sym_xform *next;
515 /**< next xform in chain */
516 enum rte_crypto_sym_xform_type type
520 struct rte_crypto_auth_xform auth;
521 /**< Authentication / hash xform */
522 struct rte_crypto_cipher_xform cipher;
524 struct rte_crypto_aead_xform aead;
529 struct rte_cryptodev_sym_session;
532 * Symmetric Cryptographic Operation.
534 * This structure contains data relating to performing symmetric cryptographic
535 * processing on a referenced mbuf data buffer.
537 * When a symmetric crypto operation is enqueued with the device for processing
538 * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
539 * which contains the source data which the crypto operation is to be performed
541 * While the mbuf is in use by a crypto operation no part of the mbuf should be
542 * changed by the application as the device may read or write to any part of the
543 * mbuf. In the case of hardware crypto devices some or all of the mbuf
544 * may be DMAed in and out of the device, so writing over the original data,
545 * though only the part specified by the rte_crypto_sym_op for transformation
547 * Out-of-place (OOP) operation, where the source mbuf is different to the
548 * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
549 * The part copied includes all the parts of the source mbuf that will be
550 * operated on, based on the cipher.data.offset+cipher.data.length and
551 * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
552 * indicated by the cipher parameters will be transformed, any extra data around
553 * this indicated by the auth parameters will be copied unchanged from source to
555 * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
556 * both source and destination mbufs. As these offsets are relative to the
557 * data_off parameter in each mbuf this can result in the data written to the
558 * destination buffer being at a different alignment, relative to buffer start,
559 * to the data in the source buffer.
561 struct rte_crypto_sym_op {
562 struct rte_mbuf *m_src; /**< source mbuf */
563 struct rte_mbuf *m_dst; /**< destination mbuf */
567 struct rte_cryptodev_sym_session *session;
568 /**< Handle for the initialised session context */
569 struct rte_crypto_sym_xform *xform;
570 /**< Session-less API crypto operation parameters */
571 struct rte_security_session *sec_session;
572 /**< Handle for the initialised security session context */
580 /**< Starting point for AEAD processing, specified as
581 * number of bytes from start of packet in source
585 /**< The message length, in bytes, of the source buffer
586 * on which the cryptographic operation will be
587 * computed. This must be a multiple of the block size
589 } data; /**< Data offsets and length for AEAD */
592 /**< This points to the location where the digest result
593 * should be inserted (in the case of digest generation)
594 * or where the purported digest exists (in the case of
595 * digest verification).
597 * At session creation time, the client specified the
598 * digest result length with the digest_length member
599 * of the @ref rte_crypto_auth_xform structure. For
600 * physical crypto devices the caller must allocate at
601 * least digest_length of physically contiguous memory
604 * For digest generation, the digest result will
605 * overwrite any data at this location.
608 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
609 * "digest result" read "authentication tag T".
611 rte_iova_t phys_addr;
612 /**< Physical address of digest */
613 } digest; /**< Digest parameters */
616 /**< Pointer to Additional Authenticated Data (AAD)
617 * needed for authenticated cipher mechanisms (CCM and
620 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
621 * the caller should setup this field as follows:
623 * - the additional authentication data itself should
624 * be written starting at an offset of 18 bytes into
625 * the array, leaving room for the first block (16 bytes)
626 * and the length encoding in the first two bytes of the
629 * - the array should be big enough to hold the above
630 * fields, plus any padding to round this up to the
631 * nearest multiple of the block size (16 bytes).
632 * Padding will be added by the implementation.
634 * - Note that PMDs may modify the memory reserved
635 * (first 18 bytes and the final padding).
637 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
638 * caller should setup this field as follows:
640 * - the AAD is written in starting at byte 0
641 * - the array must be big enough to hold the AAD, plus
642 * any space to round this up to the nearest multiple
643 * of the block size (16 bytes).
646 rte_iova_t phys_addr; /**< physical address */
648 /**< Additional authentication parameters */
655 /**< Starting point for cipher processing,
656 * specified as number of bytes from start
657 * of data in the source buffer.
658 * The result of the cipher operation will be
659 * written back into the output buffer
660 * starting at this location.
663 * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
664 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
665 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
666 * this field should be in bits. For
667 * digest-encrypted cases this must be
671 /**< The message length, in bytes, of the
672 * source buffer on which the cryptographic
673 * operation will be computed.
674 * This must be a multiple of the block size
675 * if a block cipher is being used. This is
676 * also the same as the result length.
679 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
680 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
681 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
682 * this field should be in bits. For
683 * digest-encrypted cases this must be
686 } data; /**< Data offsets and length for ciphering */
692 /**< Starting point for hash processing,
693 * specified as number of bytes from start of
694 * packet in source buffer.
697 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
698 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
699 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
700 * this field should be in bits. For
701 * digest-encrypted cases this must be
705 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
706 * this offset should be such that
707 * data to authenticate starts at COUNT.
710 * For DOCSIS security protocol, this
711 * offset is the DOCSIS header length
712 * and, therefore, also the CRC offset
713 * i.e. the number of bytes into the
714 * packet at which CRC calculation
718 /**< The message length, in bytes, of the source
719 * buffer that the hash will be computed on.
722 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
723 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
724 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
725 * this field should be in bits. For
726 * digest-encrypted cases this must be
730 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
731 * the length should include the COUNT,
732 * FRESH, message, direction bit and padding
733 * (to be multiple of 8 bits).
736 * For DOCSIS security protocol, this
737 * is the CRC length i.e. the number of
738 * bytes in the packet over which the
739 * CRC should be calculated
742 /**< Data offsets and length for authentication */
746 /**< This points to the location where
747 * the digest result should be inserted
748 * (in the case of digest generation)
749 * or where the purported digest exists
750 * (in the case of digest verification).
752 * At session creation time, the client
753 * specified the digest result length with
754 * the digest_length member of the
755 * @ref rte_crypto_auth_xform structure.
756 * For physical crypto devices the caller
757 * must allocate at least digest_length of
758 * physically contiguous memory at this
761 * For digest generation, the digest result
762 * will overwrite any data at this location.
765 * Digest-encrypted case.
766 * Digest can be generated, appended to
767 * the end of raw data and encrypted
768 * together using chained digest
770 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
772 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
773 * xforms. Similarly, authentication
774 * of the raw data against appended,
775 * decrypted digest, can be performed
777 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
778 * and digest verification
779 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
781 * To perform those operations, a few
782 * additional conditions must be met:
783 * - caller must allocate at least
784 * digest_length of memory at the end of
785 * source and (in case of out-of-place
786 * operations) destination buffer; those
787 * buffers can be linear or split using
788 * scatter-gather lists,
789 * - digest data pointer must point to
790 * the end of source or (in case of
791 * out-of-place operations) destination
792 * data, which is pointer to the
793 * data buffer + auth.data.offset +
795 * - cipher.data.offset +
796 * cipher.data.length must be greater
797 * than auth.data.offset +
798 * auth.data.length and is typically
799 * equal to auth.data.offset +
800 * auth.data.length + digest_length.
801 * - for wireless algorithms, i.e.
802 * SNOW 3G, KASUMI and ZUC, as the
803 * cipher.data.length,
804 * cipher.data.offset,
805 * auth.data.length and
806 * auth.data.offset are in bits, they
807 * must be 8-bit multiples.
809 * Note, that for security reasons, it
810 * is PMDs' responsibility to not
811 * leave an unencrypted digest in any
812 * buffer after performing auth-cipher
816 rte_iova_t phys_addr;
817 /**< Physical address of digest */
818 } digest; /**< Digest parameters */
826 * Reset the fields of a symmetric operation to their default values.
828 * @param op The crypto operation to be reset.
831 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
833 memset(op, 0, sizeof(*op));
838 * Allocate space for symmetric crypto xforms in the private data space of the
839 * crypto operation. This also defaults the crypto xform type to
840 * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
841 * in the crypto operation
844 * - On success returns pointer to first crypto xform in crypto operations chain
845 * - On failure returns NULL
847 static inline struct rte_crypto_sym_xform *
848 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
849 void *priv_data, uint8_t nb_xforms)
851 struct rte_crypto_sym_xform *xform;
853 sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
856 xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
857 xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
860 return sym_op->xform;
865 * Attach a session to a symmetric crypto operation
867 * @param sym_op crypto operation
868 * @param sess cryptodev session
871 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
872 struct rte_cryptodev_sym_session *sess)
874 sym_op->session = sess;
880 * Converts portion of mbuf data into a vector representation.
881 * Each segment will be represented as a separate entry in *vec* array.
882 * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
884 * Pointer to the *rte_mbuf* object.
886 * Offset within mbuf data to start with.
888 * Length of data to represent.
890 * Pointer to an output array of IO vectors.
892 * Size of an output array.
894 * - number of successfully filled entries in *vec* array.
895 * - negative number of elements in *vec* array required.
899 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
900 struct rte_crypto_vec vec[], uint32_t num)
903 struct rte_mbuf *nseg;
907 /* assuming that requested data starts in the first segment */
908 RTE_ASSERT(mb->data_len > ofs);
910 if (mb->nb_segs > num)
913 vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
914 vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
916 /* whole data lies in the first segment */
917 seglen = mb->data_len - ofs;
923 /* data spread across segments */
926 for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
928 vec[i].base = rte_pktmbuf_mtod(nseg, void *);
929 vec[i].iova = rte_pktmbuf_iova(nseg);
931 seglen = nseg->data_len;
932 if (left <= seglen) {
933 /* whole requested data is completed */
939 /* use whole segment */
944 RTE_ASSERT(left == 0);
953 #endif /* _RTE_CRYPTO_SYM_H_ */