/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2019 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
*/
#ifndef _RTE_CRYPTO_SYM_H_
#include <rte_mempool.h>
#include <rte_common.h>
+/**
+ * Crypto IO Vector (in analogy with struct iovec)
+ * Supposed be used to pass input/output data buffers for crypto data-path
+ * functions.
+ */
+struct rte_crypto_vec {
+ /** virtual address of the data buffer */
+ void *base;
+ /** IOVA of the data buffer */
+ rte_iova_t iova;
+ /** length of the data buffer */
+ uint32_t len;
+};
+
+/**
+ * Crypto scatter-gather list descriptor. Consists of a pointer to an array
+ * of Crypto IO vectors with its size.
+ */
+struct rte_crypto_sgl {
+ /** start of an array of vectors */
+ struct rte_crypto_vec *vec;
+ /** size of an array of vectors */
+ uint32_t num;
+};
+
+/**
+ * Synchronous operation descriptor.
+ * Supposed to be used with CPU crypto API call.
+ */
+struct rte_crypto_sym_vec {
+ /** array of SGL vectors */
+ struct rte_crypto_sgl *sgl;
+ /** array of pointers to IV */
+ void **iv;
+ /** array of pointers to AAD */
+ void **aad;
+ /** array of pointers to digest */
+ void **digest;
+ /**
+ * array of statuses for each operation:
+ * - 0 on success
+ * - errno on error
+ */
+ int32_t *status;
+ /** number of operations to perform */
+ uint32_t num;
+};
+
+/**
+ * used for cpu_crypto_process_bulk() to specify head/tail offsets
+ * for auth/cipher processing.
+ */
+union rte_crypto_sym_ofs {
+ uint64_t raw;
+ struct {
+ struct {
+ uint16_t head;
+ uint16_t tail;
+ } auth, cipher;
+ } ofs;
+};
/** Symmetric Cipher Algorithms */
enum rte_crypto_cipher_algorithm {
/**< Cipher algorithm */
struct {
- uint8_t *data; /**< pointer to key data */
- uint16_t length;/**< key length in bytes */
+ const uint8_t *data; /**< pointer to key data */
+ uint16_t length; /**< key length in bytes */
} key;
/**< Cipher key
*
/**< Authentication algorithm selection */
struct {
- uint8_t *data; /**< pointer to key data */
- uint16_t length;/**< key length in bytes */
+ const uint8_t *data; /**< pointer to key data */
+ uint16_t length; /**< key length in bytes */
} key;
/**< Authentication key data.
* The authentication key length MUST be less than or equal to the
/**< AEAD algorithm selection */
struct {
- uint8_t *data; /**< pointer to key data */
- uint16_t length;/**< key length in bytes */
+ const uint8_t *data; /**< pointer to key data */
+ uint16_t length; /**< key length in bytes */
} key;
struct {
* For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
* KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
* and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
- * this field should be in bits.
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
*/
uint32_t length;
/**< The message length, in bytes, of the
* For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
* KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
* and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
- * this field should be in bits.
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
*/
} data; /**< Data offsets and length for ciphering */
} cipher;
* For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
* KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
* and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
- * this field should be in bits.
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
*
* @note
* For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
* For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
* KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
* and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
- * this field should be in bits.
+ * this field should be in bits. For
+ * digest-encrypted cases this must be
+ * an 8-bit multiple.
*
* @note
* For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
* For digest generation, the digest result
* will overwrite any data at this location.
*
+ * @note
+ * Digest-encrypted case.
+ * Digest can be generated, appended to
+ * the end of raw data and encrypted
+ * together using chained digest
+ * generation
+ * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
+ * and encryption
+ * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ * xforms. Similarly, authentication
+ * of the raw data against appended,
+ * decrypted digest, can be performed
+ * using decryption
+ * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ * and digest verification
+ * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
+ * chained xforms.
+ * To perform those operations, a few
+ * additional conditions must be met:
+ * - caller must allocate at least
+ * digest_length of memory at the end of
+ * source and (in case of out-of-place
+ * operations) destination buffer; those
+ * buffers can be linear or split using
+ * scatter-gather lists,
+ * - digest data pointer must point to
+ * the end of source or (in case of
+ * out-of-place operations) destination
+ * data, which is pointer to the
+ * data buffer + auth.data.offset +
+ * auth.data.length,
+ * - cipher.data.offset +
+ * cipher.data.length must be greater
+ * than auth.data.offset +
+ * auth.data.length and is typically
+ * equal to auth.data.offset +
+ * auth.data.length + digest_length.
+ * - for wireless algorithms, i.e.
+ * SNOW 3G, KASUMI and ZUC, as the
+ * cipher.data.length,
+ * cipher.data.offset,
+ * auth.data.length and
+ * auth.data.offset are in bits, they
+ * must be 8-bit multiples.
+ *
+ * Note, that for security reasons, it
+ * is PMDs' responsibility to not
+ * leave an unencrypted digest in any
+ * buffer after performing auth-cipher
+ * operations.
+ *
*/
rte_iova_t phys_addr;
/**< Physical address of digest */
return 0;
}
+/**
+ * Converts portion of mbuf data into a vector representation.
+ * Each segment will be represented as a separate entry in *vec* array.
+ * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
+ * @param mb
+ * Pointer to the *rte_mbuf* object.
+ * @param ofs
+ * Offset within mbuf data to start with.
+ * @param len
+ * Length of data to represent.
+ * @param vec
+ * Pointer to an output array of IO vectors.
+ * @param num
+ * Size of an output array.
+ * @return
+ * - number of successfully filled entries in *vec* array.
+ * - negative number of elements in *vec* array required.
+ */
+__rte_experimental
+static inline int
+rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
+ struct rte_crypto_vec vec[], uint32_t num)
+{
+ uint32_t i;
+ struct rte_mbuf *nseg;
+ uint32_t left;
+ uint32_t seglen;
+
+ /* assuming that requested data starts in the first segment */
+ RTE_ASSERT(mb->data_len > ofs);
+
+ if (mb->nb_segs > num)
+ return -mb->nb_segs;
+
+ vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
+ vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
+
+ /* whole data lies in the first segment */
+ seglen = mb->data_len - ofs;
+ if (len <= seglen) {
+ vec[0].len = len;
+ return 1;
+ }
+
+ /* data spread across segments */
+ vec[0].len = seglen;
+ left = len - seglen;
+ for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
+
+ vec[i].base = rte_pktmbuf_mtod(nseg, void *);
+ vec[i].iova = rte_pktmbuf_iova(nseg);
+
+ seglen = nseg->data_len;
+ if (left <= seglen) {
+ /* whole requested data is completed */
+ vec[i].len = left;
+ left = 0;
+ break;
+ }
+
+ /* use whole segment */
+ vec[i].len = seglen;
+ left -= seglen;
+ }
+
+ RTE_ASSERT(left == 0);
+ return i + 1;
+}
+
#ifdef __cplusplus
}