1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
9 * @file rte_crypto_sym.h
11 * RTE Definitions for Symmetric Cryptography
13 * Defines symmetric cipher and authentication algorithms and modes, as well
14 * as supported symmetric crypto operation combinations.
24 #include <rte_memory.h>
25 #include <rte_mempool.h>
26 #include <rte_common.h>
29 * Crypto IO Vector (in analogy with struct iovec)
30 * Supposed be used to pass input/output data buffers for crypto data-path
33 struct rte_crypto_vec {
34 /** virtual address of the data buffer */
36 /** IOVA of the data buffer */
38 /** length of the data buffer */
40 /** total buffer length */
45 * Crypto scatter-gather list descriptor. Consists of a pointer to an array
46 * of Crypto IO vectors with its size.
48 struct rte_crypto_sgl {
49 /** start of an array of vectors */
50 struct rte_crypto_vec *vec;
51 /** size of an array of vectors */
56 * Crypto virtual and IOVA address descriptor, used to describe cryptographic
57 * data buffer without the length information. The length information is
58 * normally predefined during session creation.
60 struct rte_crypto_va_iova_ptr {
66 * Raw data operation descriptor.
67 * Supposed to be used with synchronous CPU crypto API call or asynchronous
68 * RAW data path API call.
70 struct rte_crypto_sym_vec {
71 /** number of operations to perform */
73 /** array of SGL vectors */
74 struct rte_crypto_sgl *src_sgl;
75 /** array of SGL vectors for OOP, keep it NULL for inplace*/
76 struct rte_crypto_sgl *dest_sgl;
77 /** array of pointers to cipher IV */
78 struct rte_crypto_va_iova_ptr *iv;
79 /** array of pointers to digest */
80 struct rte_crypto_va_iova_ptr *digest;
84 /** array of pointers to auth IV, used for chain operation */
85 struct rte_crypto_va_iova_ptr *auth_iv;
86 /** array of pointers to AAD, used for AEAD operation */
87 struct rte_crypto_va_iova_ptr *aad;
91 * array of statuses for each operation:
99 * used for cpu_crypto_process_bulk() to specify head/tail offsets
100 * for auth/cipher processing.
102 union rte_crypto_sym_ofs {
112 /** Symmetric Cipher Algorithms
114 * Note, to avoid ABI breakage across releases
115 * - LIST_END should not be added to this enum
116 * - the order of enums should not be changed
117 * - new algorithms should only be added to the end
119 enum rte_crypto_cipher_algorithm {
120 RTE_CRYPTO_CIPHER_NULL = 1,
121 /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
123 RTE_CRYPTO_CIPHER_3DES_CBC,
124 /**< Triple DES algorithm in CBC mode */
125 RTE_CRYPTO_CIPHER_3DES_CTR,
126 /**< Triple DES algorithm in CTR mode */
127 RTE_CRYPTO_CIPHER_3DES_ECB,
128 /**< Triple DES algorithm in ECB mode */
130 RTE_CRYPTO_CIPHER_AES_CBC,
131 /**< AES algorithm in CBC mode */
132 RTE_CRYPTO_CIPHER_AES_CTR,
133 /**< AES algorithm in Counter mode */
134 RTE_CRYPTO_CIPHER_AES_ECB,
135 /**< AES algorithm in ECB mode */
136 RTE_CRYPTO_CIPHER_AES_F8,
137 /**< AES algorithm in F8 mode */
138 RTE_CRYPTO_CIPHER_AES_XTS,
139 /**< AES algorithm in XTS mode */
141 RTE_CRYPTO_CIPHER_ARC4,
142 /**< (A)RC4 cipher algorithm */
144 RTE_CRYPTO_CIPHER_KASUMI_F8,
145 /**< KASUMI algorithm in F8 mode */
147 RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
148 /**< SNOW 3G algorithm in UEA2 mode */
150 RTE_CRYPTO_CIPHER_ZUC_EEA3,
151 /**< ZUC algorithm in EEA3 mode */
153 RTE_CRYPTO_CIPHER_DES_CBC,
154 /**< DES algorithm in CBC mode */
156 RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
157 /**< AES algorithm using modes required by
158 * DOCSIS Baseline Privacy Plus Spec.
159 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
160 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
163 RTE_CRYPTO_CIPHER_DES_DOCSISBPI
164 /**< DES algorithm using modes required by
165 * DOCSIS Baseline Privacy Plus Spec.
166 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
167 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
171 /** Cipher algorithm name strings */
173 rte_crypto_cipher_algorithm_strings[];
175 /** Symmetric Cipher Direction */
176 enum rte_crypto_cipher_operation {
177 RTE_CRYPTO_CIPHER_OP_ENCRYPT,
178 /**< Encrypt cipher operation */
179 RTE_CRYPTO_CIPHER_OP_DECRYPT
180 /**< Decrypt cipher operation */
183 /** Cipher operation name strings */
185 rte_crypto_cipher_operation_strings[];
188 * Symmetric Cipher Setup Data.
190 * This structure contains data relating to Cipher (Encryption and Decryption)
191 * use to create a session.
193 struct rte_crypto_cipher_xform {
194 enum rte_crypto_cipher_operation op;
195 /**< This parameter determines if the cipher operation is an encrypt or
196 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
197 * only encrypt operations are valid.
199 enum rte_crypto_cipher_algorithm algo;
200 /**< Cipher algorithm */
203 const uint8_t *data; /**< pointer to key data */
204 uint16_t length; /**< key length in bytes */
208 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
209 * original key data provided may be wrapped(encrypted) using key wrap
210 * algorithm such as AES key wrap (rfc3394) and hence length of the key
211 * may increase beyond the PMD advertised supported key size.
212 * PMD shall validate the key length and report EMSGSIZE error while
213 * configuring the session and application can skip checking the
214 * capability key length in such cases.
216 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
217 * point to a concatenation of the AES encryption key followed by a
218 * keymask. As per RFC3711, the keymask should be padded with trailing
219 * bytes to match the length of the encryption key used.
221 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
222 * 192 bits (24 bytes) or 256 bits (32 bytes).
224 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
225 * should be set to the combined length of the encryption key and the
226 * keymask. Since the keymask and the encryption key are the same size,
227 * key.length should be set to 2 x the AES encryption key length.
229 * For the AES-XTS mode of operation:
230 * - Two keys must be provided and key.length refers to total length of
232 * - key.data must point to the two keys concatenated together
234 * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
235 * - Both keys must have the same size.
239 /**< Starting point for Initialisation Vector or Counter,
240 * specified as number of bytes from start of crypto
241 * operation (rte_crypto_op).
243 * - For block ciphers in CBC or F8 mode, or for KASUMI
244 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
245 * Initialisation Vector (IV) value.
247 * - For block ciphers in CTR mode, this is the counter.
249 * - For CCM mode, the first byte is reserved, and the
250 * nonce should be written starting at &iv[1] (to allow
251 * space for the implementation to write in the flags
252 * in the first byte). Note that a full 16 bytes should
253 * be allocated, even though the length field will
254 * have a value less than this. Note that the PMDs may
255 * modify the memory reserved (the first byte and the
258 * - For AES-XTS, this is the 128bit tweak, i, from
259 * IEEE Std 1619-2007.
261 * For optimum performance, the data pointed to SHOULD
265 /**< Length of valid IV data.
267 * - For block ciphers in CBC or F8 mode, or for KASUMI
268 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
269 * length of the IV (which must be the same as the
270 * block length of the cipher).
272 * - For block ciphers in CTR mode, this is the length
273 * of the counter (which must be the same as the block
274 * length of the cipher).
276 * - For CCM mode, this is the length of the nonce,
277 * which can be in the range 7 to 13 inclusive.
279 } iv; /**< Initialisation vector parameters */
281 uint32_t dataunit_len;
282 /**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
283 * this is the data-unit length of the algorithm,
284 * otherwise or when the value is 0, use the operation length.
285 * The value should be in the range defined by the dataunit_set field
286 * in the cipher capability.
288 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
289 * For-each data-unit in the operation, the tweak (IV) value is
290 * assigned consecutively starting from the operation assigned IV.
294 /** Symmetric Authentication / Hash Algorithms
296 * Note, to avoid ABI breakage across releases
297 * - LIST_END should not be added to this enum
298 * - the order of enums should not be changed
299 * - new algorithms should only be added to the end
301 enum rte_crypto_auth_algorithm {
302 RTE_CRYPTO_AUTH_NULL = 1,
303 /**< NULL hash algorithm. */
305 RTE_CRYPTO_AUTH_AES_CBC_MAC,
306 /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
307 RTE_CRYPTO_AUTH_AES_CMAC,
308 /**< AES CMAC algorithm. */
309 RTE_CRYPTO_AUTH_AES_GMAC,
310 /**< AES GMAC algorithm. */
311 RTE_CRYPTO_AUTH_AES_XCBC_MAC,
312 /**< AES XCBC algorithm. */
314 RTE_CRYPTO_AUTH_KASUMI_F9,
315 /**< KASUMI algorithm in F9 mode. */
318 /**< MD5 algorithm */
319 RTE_CRYPTO_AUTH_MD5_HMAC,
320 /**< HMAC using MD5 algorithm */
322 RTE_CRYPTO_AUTH_SHA1,
323 /**< 160 bit SHA algorithm. */
324 RTE_CRYPTO_AUTH_SHA1_HMAC,
325 /**< HMAC using 160 bit SHA algorithm.
326 * HMAC-SHA-1-96 can be generated by setting
327 * digest_length to 12 bytes in auth/aead xforms.
329 RTE_CRYPTO_AUTH_SHA224,
330 /**< 224 bit SHA algorithm. */
331 RTE_CRYPTO_AUTH_SHA224_HMAC,
332 /**< HMAC using 224 bit SHA algorithm. */
333 RTE_CRYPTO_AUTH_SHA256,
334 /**< 256 bit SHA algorithm. */
335 RTE_CRYPTO_AUTH_SHA256_HMAC,
336 /**< HMAC using 256 bit SHA algorithm. */
337 RTE_CRYPTO_AUTH_SHA384,
338 /**< 384 bit SHA algorithm. */
339 RTE_CRYPTO_AUTH_SHA384_HMAC,
340 /**< HMAC using 384 bit SHA algorithm. */
341 RTE_CRYPTO_AUTH_SHA512,
342 /**< 512 bit SHA algorithm. */
343 RTE_CRYPTO_AUTH_SHA512_HMAC,
344 /**< HMAC using 512 bit SHA algorithm. */
346 RTE_CRYPTO_AUTH_SNOW3G_UIA2,
347 /**< SNOW 3G algorithm in UIA2 mode. */
349 RTE_CRYPTO_AUTH_ZUC_EIA3,
350 /**< ZUC algorithm in EIA3 mode */
352 RTE_CRYPTO_AUTH_SHA3_224,
353 /**< 224 bit SHA3 algorithm. */
354 RTE_CRYPTO_AUTH_SHA3_224_HMAC,
355 /**< HMAC using 224 bit SHA3 algorithm. */
356 RTE_CRYPTO_AUTH_SHA3_256,
357 /**< 256 bit SHA3 algorithm. */
358 RTE_CRYPTO_AUTH_SHA3_256_HMAC,
359 /**< HMAC using 256 bit SHA3 algorithm. */
360 RTE_CRYPTO_AUTH_SHA3_384,
361 /**< 384 bit SHA3 algorithm. */
362 RTE_CRYPTO_AUTH_SHA3_384_HMAC,
363 /**< HMAC using 384 bit SHA3 algorithm. */
364 RTE_CRYPTO_AUTH_SHA3_512,
365 /**< 512 bit SHA3 algorithm. */
366 RTE_CRYPTO_AUTH_SHA3_512_HMAC
367 /**< HMAC using 512 bit SHA3 algorithm. */
370 /** Authentication algorithm name strings */
372 rte_crypto_auth_algorithm_strings[];
374 /** Symmetric Authentication / Hash Operations */
375 enum rte_crypto_auth_operation {
376 RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
377 RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
380 /** Authentication operation name strings */
382 rte_crypto_auth_operation_strings[];
385 * Authentication / Hash transform data.
387 * This structure contains data relating to an authentication/hash crypto
388 * transforms. The fields op, algo and digest_length are common to all
389 * authentication transforms and MUST be set.
391 struct rte_crypto_auth_xform {
392 enum rte_crypto_auth_operation op;
393 /**< Authentication operation type */
394 enum rte_crypto_auth_algorithm algo;
395 /**< Authentication algorithm selection */
398 const uint8_t *data; /**< pointer to key data */
399 uint16_t length; /**< key length in bytes */
401 /**< Authentication key data.
402 * The authentication key length MUST be less than or equal to the
403 * block size of the algorithm. It is the callers responsibility to
404 * ensure that the key length is compliant with the standard being used
405 * (for example RFC 2104, FIPS 198a).
410 /**< Starting point for Initialisation Vector or Counter,
411 * specified as number of bytes from start of crypto
412 * operation (rte_crypto_op).
414 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
415 * this is the authentication Initialisation Vector
416 * (IV) value. For AES-GMAC IV description please refer
417 * to the field `length` in iv struct.
419 * - For KASUMI in F9 mode and other authentication
420 * algorithms, this field is not used.
422 * For optimum performance, the data pointed to SHOULD
426 /**< Length of valid IV data.
428 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
429 * for AES-GMAC, this is the length of the IV.
431 * - For KASUMI in F9 mode and other authentication
432 * algorithms, this field is not used.
434 * - For GMAC mode, this is either:
435 * 1) Number greater or equal to one, which means that IV
436 * is used and J0 will be computed internally, a minimum
437 * of 16 bytes must be allocated.
438 * 2) Zero, in which case data points to J0. In this case
439 * 16 bytes of J0 should be passed where J0 is defined
443 } iv; /**< Initialisation vector parameters */
445 uint16_t digest_length;
446 /**< Length of the digest to be returned. If the verify option is set,
447 * this specifies the length of the digest to be compared for the
450 * It is the caller's responsibility to ensure that the
451 * digest length is compliant with the hash algorithm being used.
452 * If the value is less than the maximum length allowed by the hash,
453 * the result shall be truncated.
458 /** Symmetric AEAD Algorithms
460 * Note, to avoid ABI breakage across releases
461 * - LIST_END should not be added to this enum
462 * - the order of enums should not be changed
463 * - new algorithms should only be added to the end
465 enum rte_crypto_aead_algorithm {
466 RTE_CRYPTO_AEAD_AES_CCM = 1,
467 /**< AES algorithm in CCM mode. */
468 RTE_CRYPTO_AEAD_AES_GCM,
469 /**< AES algorithm in GCM mode. */
470 RTE_CRYPTO_AEAD_CHACHA20_POLY1305
471 /**< Chacha20 cipher with poly1305 authenticator */
474 /** AEAD algorithm name strings */
476 rte_crypto_aead_algorithm_strings[];
478 /** Symmetric AEAD Operations */
479 enum rte_crypto_aead_operation {
480 RTE_CRYPTO_AEAD_OP_ENCRYPT,
481 /**< Encrypt and generate digest */
482 RTE_CRYPTO_AEAD_OP_DECRYPT
483 /**< Verify digest and decrypt */
486 /** Authentication operation name strings */
488 rte_crypto_aead_operation_strings[];
490 struct rte_crypto_aead_xform {
491 enum rte_crypto_aead_operation op;
492 /**< AEAD operation type */
493 enum rte_crypto_aead_algorithm algo;
494 /**< AEAD algorithm selection */
497 const uint8_t *data; /**< pointer to key data */
498 uint16_t length; /**< key length in bytes */
503 /**< Starting point for Initialisation Vector or Counter,
504 * specified as number of bytes from start of crypto
505 * operation (rte_crypto_op).
507 * - For CCM mode, the first byte is reserved, and the
508 * nonce should be written starting at &iv[1] (to allow
509 * space for the implementation to write in the flags
510 * in the first byte). Note that a full 16 bytes should
511 * be allocated, even though the length field will
512 * have a value less than this.
514 * - For Chacha20-Poly1305 it is 96-bit nonce.
515 * PMD sets initial counter for Poly1305 key generation
516 * part to 0 and for Chacha20 encryption to 1 as per
517 * rfc8439 2.8. AEAD construction.
519 * For optimum performance, the data pointed to SHOULD
523 /**< Length of valid IV data.
525 * - For GCM mode, this is either:
526 * 1) Number greater or equal to one, which means that IV
527 * is used and J0 will be computed internally, a minimum
528 * of 16 bytes must be allocated.
529 * 2) Zero, in which case data points to J0. In this case
530 * 16 bytes of J0 should be passed where J0 is defined
533 * - For CCM mode, this is the length of the nonce,
534 * which can be in the range 7 to 13 inclusive.
536 * - For Chacha20-Poly1305 this field is always 12.
538 } iv; /**< Initialisation vector parameters */
540 uint16_t digest_length;
543 /**< The length of the additional authenticated data (AAD) in bytes.
544 * For CCM mode, this is the length of the actual AAD, even though
545 * it is required to reserve 18 bytes before the AAD and padding
546 * at the end of it, so a multiple of 16 bytes is allocated.
550 /** Crypto transformation types */
551 enum rte_crypto_sym_xform_type {
552 RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */
553 RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */
554 RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */
555 RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */
559 * Symmetric crypto transform structure.
561 * This is used to specify the crypto transforms required, multiple transforms
562 * can be chained together to specify a chain transforms such as authentication
563 * then cipher, or cipher then authentication. Each transform structure can
564 * hold a single transform, the type field is used to specify which transform
565 * is contained within the union
567 struct rte_crypto_sym_xform {
568 struct rte_crypto_sym_xform *next;
569 /**< next xform in chain */
570 enum rte_crypto_sym_xform_type type
574 struct rte_crypto_auth_xform auth;
575 /**< Authentication / hash xform */
576 struct rte_crypto_cipher_xform cipher;
578 struct rte_crypto_aead_xform aead;
583 struct rte_cryptodev_sym_session;
586 * Symmetric Cryptographic Operation.
588 * This structure contains data relating to performing symmetric cryptographic
589 * processing on a referenced mbuf data buffer.
591 * When a symmetric crypto operation is enqueued with the device for processing
592 * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
593 * which contains the source data which the crypto operation is to be performed
595 * While the mbuf is in use by a crypto operation no part of the mbuf should be
596 * changed by the application as the device may read or write to any part of the
597 * mbuf. In the case of hardware crypto devices some or all of the mbuf
598 * may be DMAed in and out of the device, so writing over the original data,
599 * though only the part specified by the rte_crypto_sym_op for transformation
601 * Out-of-place (OOP) operation, where the source mbuf is different to the
602 * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
603 * The part copied includes all the parts of the source mbuf that will be
604 * operated on, based on the cipher.data.offset+cipher.data.length and
605 * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
606 * indicated by the cipher parameters will be transformed, any extra data around
607 * this indicated by the auth parameters will be copied unchanged from source to
609 * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
610 * both source and destination mbufs. As these offsets are relative to the
611 * data_off parameter in each mbuf this can result in the data written to the
612 * destination buffer being at a different alignment, relative to buffer start,
613 * to the data in the source buffer.
615 struct rte_crypto_sym_op {
616 struct rte_mbuf *m_src; /**< source mbuf */
617 struct rte_mbuf *m_dst; /**< destination mbuf */
621 struct rte_cryptodev_sym_session *session;
622 /**< Handle for the initialised session context */
623 struct rte_crypto_sym_xform *xform;
624 /**< Session-less API crypto operation parameters */
625 struct rte_security_session *sec_session;
626 /**< Handle for the initialised security session context */
634 /**< Starting point for AEAD processing, specified as
635 * number of bytes from start of packet in source
639 /**< The message length, in bytes, of the source buffer
640 * on which the cryptographic operation will be
641 * computed. This must be a multiple of the block size
643 } data; /**< Data offsets and length for AEAD */
646 /**< This points to the location where the digest result
647 * should be inserted (in the case of digest generation)
648 * or where the purported digest exists (in the case of
649 * digest verification).
651 * At session creation time, the client specified the
652 * digest result length with the digest_length member
653 * of the @ref rte_crypto_auth_xform structure. For
654 * physical crypto devices the caller must allocate at
655 * least digest_length of physically contiguous memory
658 * For digest generation, the digest result will
659 * overwrite any data at this location.
662 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
663 * "digest result" read "authentication tag T".
665 rte_iova_t phys_addr;
666 /**< Physical address of digest */
667 } digest; /**< Digest parameters */
670 /**< Pointer to Additional Authenticated Data (AAD)
671 * needed for authenticated cipher mechanisms (CCM and
674 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
675 * the caller should setup this field as follows:
677 * - the additional authentication data itself should
678 * be written starting at an offset of 18 bytes into
679 * the array, leaving room for the first block (16 bytes)
680 * and the length encoding in the first two bytes of the
683 * - the array should be big enough to hold the above
684 * fields, plus any padding to round this up to the
685 * nearest multiple of the block size (16 bytes).
686 * Padding will be added by the implementation.
688 * - Note that PMDs may modify the memory reserved
689 * (first 18 bytes and the final padding).
691 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
692 * caller should setup this field as follows:
694 * - the AAD is written in starting at byte 0
695 * - the array must be big enough to hold the AAD, plus
696 * any space to round this up to the nearest multiple
697 * of the block size (16 bytes).
700 rte_iova_t phys_addr; /**< physical address */
702 /**< Additional authentication parameters */
709 /**< Starting point for cipher processing,
710 * specified as number of bytes from start
711 * of data in the source buffer.
712 * The result of the cipher operation will be
713 * written back into the output buffer
714 * starting at this location.
717 * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
718 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
719 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
720 * this field should be in bits. For
721 * digest-encrypted cases this must be
725 /**< The message length, in bytes, of the
726 * source buffer on which the cryptographic
727 * operation will be computed.
728 * This is also the same as the result length.
729 * This must be a multiple of the block size
730 * or a multiple of data-unit length
731 * as described in xform.
734 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
735 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
736 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
737 * this field should be in bits. For
738 * digest-encrypted cases this must be
741 } data; /**< Data offsets and length for ciphering */
747 /**< Starting point for hash processing,
748 * specified as number of bytes from start of
749 * packet in source buffer.
752 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
753 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
754 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
755 * this field should be in bits. For
756 * digest-encrypted cases this must be
760 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
761 * this offset should be such that
762 * data to authenticate starts at COUNT.
765 * For DOCSIS security protocol, this
766 * offset is the DOCSIS header length
767 * and, therefore, also the CRC offset
768 * i.e. the number of bytes into the
769 * packet at which CRC calculation
773 /**< The message length, in bytes, of the source
774 * buffer that the hash will be computed on.
777 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
778 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
779 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
780 * this field should be in bits. For
781 * digest-encrypted cases this must be
785 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
786 * the length should include the COUNT,
787 * FRESH, message, direction bit and padding
788 * (to be multiple of 8 bits).
791 * For DOCSIS security protocol, this
792 * is the CRC length i.e. the number of
793 * bytes in the packet over which the
794 * CRC should be calculated
797 /**< Data offsets and length for authentication */
801 /**< This points to the location where
802 * the digest result should be inserted
803 * (in the case of digest generation)
804 * or where the purported digest exists
805 * (in the case of digest verification).
807 * At session creation time, the client
808 * specified the digest result length with
809 * the digest_length member of the
810 * @ref rte_crypto_auth_xform structure.
811 * For physical crypto devices the caller
812 * must allocate at least digest_length of
813 * physically contiguous memory at this
816 * For digest generation, the digest result
817 * will overwrite any data at this location.
820 * Digest-encrypted case.
821 * Digest can be generated, appended to
822 * the end of raw data and encrypted
823 * together using chained digest
825 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
827 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
828 * xforms. Similarly, authentication
829 * of the raw data against appended,
830 * decrypted digest, can be performed
832 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
833 * and digest verification
834 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
836 * To perform those operations, a few
837 * additional conditions must be met:
838 * - caller must allocate at least
839 * digest_length of memory at the end of
840 * source and (in case of out-of-place
841 * operations) destination buffer; those
842 * buffers can be linear or split using
843 * scatter-gather lists,
844 * - digest data pointer must point to
845 * the end of source or (in case of
846 * out-of-place operations) destination
847 * data, which is pointer to the
848 * data buffer + auth.data.offset +
850 * - cipher.data.offset +
851 * cipher.data.length must be greater
852 * than auth.data.offset +
853 * auth.data.length and is typically
854 * equal to auth.data.offset +
855 * auth.data.length + digest_length.
856 * - for wireless algorithms, i.e.
857 * SNOW 3G, KASUMI and ZUC, as the
858 * cipher.data.length,
859 * cipher.data.offset,
860 * auth.data.length and
861 * auth.data.offset are in bits, they
862 * must be 8-bit multiples.
864 * Note, that for security reasons, it
865 * is PMDs' responsibility to not
866 * leave an unencrypted digest in any
867 * buffer after performing auth-cipher
871 rte_iova_t phys_addr;
872 /**< Physical address of digest */
873 } digest; /**< Digest parameters */
881 * Reset the fields of a symmetric operation to their default values.
883 * @param op The crypto operation to be reset.
886 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
888 memset(op, 0, sizeof(*op));
893 * Allocate space for symmetric crypto xforms in the private data space of the
894 * crypto operation. This also defaults the crypto xform type to
895 * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
896 * in the crypto operation
899 * - On success returns pointer to first crypto xform in crypto operations chain
900 * - On failure returns NULL
902 static inline struct rte_crypto_sym_xform *
903 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
904 void *priv_data, uint8_t nb_xforms)
906 struct rte_crypto_sym_xform *xform;
908 sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
911 xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
912 xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
915 return sym_op->xform;
920 * Attach a session to a symmetric crypto operation
922 * @param sym_op crypto operation
923 * @param sess cryptodev session
926 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
927 struct rte_cryptodev_sym_session *sess)
929 sym_op->session = sess;
935 * Converts portion of mbuf data into a vector representation.
936 * Each segment will be represented as a separate entry in *vec* array.
937 * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
939 * Pointer to the *rte_mbuf* object.
941 * Offset within mbuf data to start with.
943 * Length of data to represent.
945 * Pointer to an output array of IO vectors.
947 * Size of an output array.
949 * - number of successfully filled entries in *vec* array.
950 * - negative number of elements in *vec* array required.
954 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
955 struct rte_crypto_vec vec[], uint32_t num)
958 struct rte_mbuf *nseg;
962 /* assuming that requested data starts in the first segment */
963 RTE_ASSERT(mb->data_len > ofs);
965 if (mb->nb_segs > num)
968 vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
969 vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
970 vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
972 /* whole data lies in the first segment */
973 seglen = mb->data_len - ofs;
979 /* data spread across segments */
982 for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
984 vec[i].base = rte_pktmbuf_mtod(nseg, void *);
985 vec[i].iova = rte_pktmbuf_iova(nseg);
986 vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
988 seglen = nseg->data_len;
989 if (left <= seglen) {
990 /* whole requested data is completed */
997 /* use whole segment */
1002 RTE_ASSERT(left == 0);
1011 #endif /* _RTE_CRYPTO_SYM_H_ */