X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_cryptodev%2Frte_crypto_sym.h;h=deb46971fe3a05a8cd78b41ac557d1ec148eb1e7;hb=579fb0b2c33a0946f37d120c4e3837a0a3817a7d;hp=bc356f6ff0746c4afaaac99166ddc31ac31dde55;hpb=6c9f3b347e21510aa46251d69ab4c809b3ec1261;p=dpdk.git diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h index bc356f6ff0..deb46971fe 100644 --- a/lib/librte_cryptodev/rte_crypto_sym.h +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2019 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #ifndef _RTE_CRYPTO_SYM_H_ @@ -25,6 +25,67 @@ extern "C" { #include #include +/** + * Crypto IO Vector (in analogy with struct iovec) + * Supposed be used to pass input/output data buffers for crypto data-path + * functions. + */ +struct rte_crypto_vec { + /** virtual address of the data buffer */ + void *base; + /** IOVA of the data buffer */ + rte_iova_t iova; + /** length of the data buffer */ + uint32_t len; +}; + +/** + * Crypto scatter-gather list descriptor. Consists of a pointer to an array + * of Crypto IO vectors with its size. + */ +struct rte_crypto_sgl { + /** start of an array of vectors */ + struct rte_crypto_vec *vec; + /** size of an array of vectors */ + uint32_t num; +}; + +/** + * Synchronous operation descriptor. + * Supposed to be used with CPU crypto API call. + */ +struct rte_crypto_sym_vec { + /** array of SGL vectors */ + struct rte_crypto_sgl *sgl; + /** array of pointers to IV */ + void **iv; + /** array of pointers to AAD */ + void **aad; + /** array of pointers to digest */ + void **digest; + /** + * array of statuses for each operation: + * - 0 on success + * - errno on error + */ + int32_t *status; + /** number of operations to perform */ + uint32_t num; +}; + +/** + * used for cpu_crypto_process_bulk() to specify head/tail offsets + * for auth/cipher processing. + */ +union rte_crypto_sym_ofs { + uint64_t raw; + struct { + struct { + uint16_t head; + uint16_t tail; + } auth, cipher; + } ofs; +}; /** Symmetric Cipher Algorithms */ enum rte_crypto_cipher_algorithm { @@ -348,8 +409,6 @@ enum rte_crypto_aead_algorithm { /**< AES algorithm in CCM mode. */ RTE_CRYPTO_AEAD_AES_GCM, /**< AES algorithm in GCM mode. */ - RTE_CRYPTO_AEAD_CHACHA20_POLY1305, - /**< Chacha20 cipher with poly1305 authenticator */ RTE_CRYPTO_AEAD_LIST_END }; @@ -393,11 +452,6 @@ struct rte_crypto_aead_xform { * be allocated, even though the length field will * have a value less than this. * - * - For Chacha20-Poly1305 it is 96-bit nonce. - * PMD sets initial counter for Poly1305 key generation - * part to 0 and for Chacha20 encryption to 1 as per - * rfc8439 2.8. AEAD construction. - * * For optimum performance, the data pointed to SHOULD * be 8-byte aligned. */ @@ -414,8 +468,6 @@ struct rte_crypto_aead_xform { * * - For CCM mode, this is the length of the nonce, * which can be in the range 7 to 13 inclusive. - * - * - For Chacha20-Poly1305 this field is always 12. */ } iv; /**< Initialisation vector parameters */ @@ -798,6 +850,73 @@ __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, return 0; } +/** + * Converts portion of mbuf data into a vector representation. + * Each segment will be represented as a separate entry in *vec* array. + * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*. + * @param mb + * Pointer to the *rte_mbuf* object. + * @param ofs + * Offset within mbuf data to start with. + * @param len + * Length of data to represent. + * @param vec + * @param num + * @return + * - number of successfully filled entries in *vec* array. + * - negative number of elements in *vec* array required. + */ +__rte_experimental +static inline int +rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len, + struct rte_crypto_vec vec[], uint32_t num) +{ + uint32_t i; + struct rte_mbuf *nseg; + uint32_t left; + uint32_t seglen; + + /* assuming that requested data starts in the first segment */ + RTE_ASSERT(mb->data_len > ofs); + + if (mb->nb_segs > num) + return -mb->nb_segs; + + vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs); + vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs); + + /* whole data lies in the first segment */ + seglen = mb->data_len - ofs; + if (len <= seglen) { + vec[0].len = len; + return 1; + } + + /* data spread across segments */ + vec[0].len = seglen; + left = len - seglen; + for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) { + + vec[i].base = rte_pktmbuf_mtod(nseg, void *); + vec[i].iova = rte_pktmbuf_iova(nseg); + + seglen = nseg->data_len; + if (left <= seglen) { + /* whole requested data is completed */ + vec[i].len = left; + left = 0; + break; + } + + /* use whole segment */ + vec[i].len = seglen; + left -= seglen; + } + + RTE_ASSERT(left == 0); + return i + 1; +} + #ifdef __cplusplus }