net/bnxt: ignore VLAN priority mask
[dpdk.git] / lib / librte_cryptodev / rte_crypto_sym.h
index bc8da24..f29c980 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2019 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
  */
 
 #ifndef _RTE_CRYPTO_SYM_H_
@@ -25,6 +25,67 @@ extern "C" {
 #include <rte_mempool.h>
 #include <rte_common.h>
 
+/**
+ * Crypto IO Vector (in analogy with struct iovec)
+ * Supposed be used to pass input/output data buffers for crypto data-path
+ * functions.
+ */
+struct rte_crypto_vec {
+       /** virtual address of the data buffer */
+       void *base;
+       /** IOVA of the data buffer */
+       rte_iova_t iova;
+       /** length of the data buffer */
+       uint32_t len;
+};
+
+/**
+ * Crypto scatter-gather list descriptor. Consists of a pointer to an array
+ * of Crypto IO vectors with its size.
+ */
+struct rte_crypto_sgl {
+       /** start of an array of vectors */
+       struct rte_crypto_vec *vec;
+       /** size of an array of vectors */
+       uint32_t num;
+};
+
+/**
+ * Synchronous operation descriptor.
+ * Supposed to be used with CPU crypto API call.
+ */
+struct rte_crypto_sym_vec {
+       /** array of SGL vectors */
+       struct rte_crypto_sgl *sgl;
+       /** array of pointers to IV */
+       void **iv;
+       /** array of pointers to AAD */
+       void **aad;
+       /** array of pointers to digest */
+       void **digest;
+       /**
+        * array of statuses for each operation:
+        *  - 0 on success
+        *  - errno on error
+        */
+       int32_t *status;
+       /** number of operations to perform */
+       uint32_t num;
+};
+
+/**
+ * used for cpu_crypto_process_bulk() to specify head/tail offsets
+ * for auth/cipher processing.
+ */
+union rte_crypto_sym_ofs {
+       uint64_t raw;
+       struct {
+               struct {
+                       uint16_t head;
+                       uint16_t tail;
+               } auth, cipher;
+       } ofs;
+};
 
 /** Symmetric Cipher Algorithms */
 enum rte_crypto_cipher_algorithm {
@@ -208,9 +269,12 @@ enum rte_crypto_auth_algorithm {
        /**< HMAC using MD5 algorithm */
 
        RTE_CRYPTO_AUTH_SHA1,
-       /**< 128 bit SHA algorithm. */
+       /**< 160 bit SHA algorithm. */
        RTE_CRYPTO_AUTH_SHA1_HMAC,
-       /**< HMAC using 128 bit SHA algorithm. */
+       /**< HMAC using 160 bit SHA algorithm.
+        * HMAC-SHA-1-96 can be generated by setting
+        * digest_length to 12 bytes in auth/aead xforms.
+        */
        RTE_CRYPTO_AUTH_SHA224,
        /**< 224 bit SHA algorithm. */
        RTE_CRYPTO_AUTH_SHA224_HMAC,
@@ -348,6 +412,8 @@ enum rte_crypto_aead_algorithm {
        /**< AES algorithm in CCM mode. */
        RTE_CRYPTO_AEAD_AES_GCM,
        /**< AES algorithm in GCM mode. */
+       RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+       /**< Chacha20 cipher with poly1305 authenticator */
        RTE_CRYPTO_AEAD_LIST_END
 };
 
@@ -391,6 +457,11 @@ struct rte_crypto_aead_xform {
                 * be allocated, even though the length field will
                 * have a value less than this.
                 *
+                * - For Chacha20-Poly1305 it is 96-bit nonce.
+                * PMD sets initial counter for Poly1305 key generation
+                * part to 0 and for Chacha20 encryption to 1 as per
+                * rfc8439 2.8. AEAD construction.
+                *
                 * For optimum performance, the data pointed to SHOULD
                 * be 8-byte aligned.
                 */
@@ -407,6 +478,8 @@ struct rte_crypto_aead_xform {
                 *
                 * - For CCM mode, this is the length of the nonce,
                 * which can be in the range 7 to 13 inclusive.
+                *
+                * - For Chacha20-Poly1305 this field is always 12.
                 */
        } iv;   /**< Initialisation vector parameters */
 
@@ -590,7 +663,9 @@ struct rte_crypto_sym_op {
                                          * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
                                          * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
                                          * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
-                                         * this field should be in bits.
+                                         * this field should be in bits. For
+                                         * digest-encrypted cases this must be
+                                         * an 8-bit multiple.
                                          */
                                        uint32_t length;
                                         /**< The message length, in bytes, of the
@@ -604,7 +679,9 @@ struct rte_crypto_sym_op {
                                          * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
                                          * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
                                          * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
-                                         * this field should be in bits.
+                                         * this field should be in bits. For
+                                         * digest-encrypted cases this must be
+                                         * an 8-bit multiple.
                                          */
                                } data; /**< Data offsets and length for ciphering */
                        } cipher;
@@ -620,12 +697,22 @@ struct rte_crypto_sym_op {
                                          * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
                                          * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
                                          * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
-                                         * this field should be in bits.
+                                         * this field should be in bits. For
+                                         * digest-encrypted cases this must be
+                                         * an 8-bit multiple.
                                          *
                                          * @note
                                          * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
                                          * this offset should be such that
                                          * data to authenticate starts at COUNT.
+                                         *
+                                         * @note
+                                         * For DOCSIS security protocol, this
+                                         * offset is the DOCSIS header length
+                                         * and, therefore, also the CRC offset
+                                         * i.e. the number of bytes into the
+                                         * packet at which CRC calculation
+                                         * should begin.
                                          */
                                        uint32_t length;
                                         /**< The message length, in bytes, of the source
@@ -635,13 +722,21 @@ struct rte_crypto_sym_op {
                                          * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
                                          * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
                                          * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
-                                         * this field should be in bits.
+                                         * this field should be in bits. For
+                                         * digest-encrypted cases this must be
+                                         * an 8-bit multiple.
                                          *
                                          * @note
                                          * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
                                          * the length should include the COUNT,
                                          * FRESH, message, direction bit and padding
                                          * (to be multiple of 8 bits).
+                                         *
+                                         * @note
+                                         * For DOCSIS security protocol, this
+                                         * is the CRC length i.e. the number of
+                                         * bytes in the packet over which the
+                                         * CRC should be calculated
                                          */
                                } data;
                                /**< Data offsets and length for authentication */
@@ -703,6 +798,13 @@ struct rte_crypto_sym_op {
                                         * auth.data.length and is typically
                                         * equal to auth.data.offset +
                                         * auth.data.length + digest_length.
+                                        * - for wireless algorithms, i.e.
+                                        * SNOW 3G, KASUMI and ZUC, as the
+                                        * cipher.data.length,
+                                        * cipher.data.offset,
+                                        * auth.data.length and
+                                        * auth.data.offset are in bits, they
+                                        * must be 8-bit multiples.
                                         *
                                         * Note, that for security reasons, it
                                         * is PMDs' responsibility to not
@@ -774,6 +876,75 @@ __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
        return 0;
 }
 
+/**
+ * Converts portion of mbuf data into a vector representation.
+ * Each segment will be represented as a separate entry in *vec* array.
+ * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
+ * @param mb
+ *   Pointer to the *rte_mbuf* object.
+ * @param ofs
+ *   Offset within mbuf data to start with.
+ * @param len
+ *   Length of data to represent.
+ * @param vec
+ *   Pointer to an output array of IO vectors.
+ * @param num
+ *   Size of an output array.
+ * @return
+ *   - number of successfully filled entries in *vec* array.
+ *   - negative number of elements in *vec* array required.
+ */
+__rte_experimental
+static inline int
+rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
+       struct rte_crypto_vec vec[], uint32_t num)
+{
+       uint32_t i;
+       struct rte_mbuf *nseg;
+       uint32_t left;
+       uint32_t seglen;
+
+       /* assuming that requested data starts in the first segment */
+       RTE_ASSERT(mb->data_len > ofs);
+
+       if (mb->nb_segs > num)
+               return -mb->nb_segs;
+
+       vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
+       vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
+
+       /* whole data lies in the first segment */
+       seglen = mb->data_len - ofs;
+       if (len <= seglen) {
+               vec[0].len = len;
+               return 1;
+       }
+
+       /* data spread across segments */
+       vec[0].len = seglen;
+       left = len - seglen;
+       for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
+
+               vec[i].base = rte_pktmbuf_mtod(nseg, void *);
+               vec[i].iova = rte_pktmbuf_iova(nseg);
+
+               seglen = nseg->data_len;
+               if (left <= seglen) {
+                       /* whole requested data is completed */
+                       vec[i].len = left;
+                       left = 0;
+                       break;
+               }
+
+               /* use whole segment */
+               vec[i].len = seglen;
+               left -= seglen;
+       }
+
+       RTE_ASSERT(left == 0);
+       return i + 1;
+}
+
 
 #ifdef __cplusplus
 }