crypto/armv8: add PMD optimized for ARMv8 processors
authorZbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
Wed, 18 Jan 2017 20:01:54 +0000 (21:01 +0100)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Thu, 19 Jan 2017 00:00:55 +0000 (01:00 +0100)
This patch introduces crypto poll mode driver
using ARMv8 cryptographic extensions.
CPU compatibility with this driver is detected in
run-time and virtual crypto device will not be
created if CPU doesn't provide:
AES, SHA1, SHA2 and NEON.

This PMD is optimized to provide performance boost
for chained crypto operations processing,
such as encryption + HMAC generation,
decryption + HMAC validation. In particular,
cipher only or hash only operations are
not provided.

The driver currently supports AES-128-CBC
in combination with: SHA256 HMAC and SHA1 HMAC
and relies on the external armv8_crypto library:
https://github.com/caviumnetworks/armv8_crypto

Build ARMv8 crypto PMD if compiling for ARM64
and CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO option
is enable in the configuration file.
ARMV8_CRYPTO_LIB_PATH environment variable will
point to the appropriate library directory.

Signed-off-by: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
Reviewed-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
MAINTAINERS
config/common_base
devtools/test-build.sh
drivers/crypto/Makefile
drivers/crypto/armv8/Makefile [new file with mode: 0644]
drivers/crypto/armv8/rte_armv8_pmd.c [new file with mode: 0644]
drivers/crypto/armv8/rte_armv8_pmd_ops.c [new file with mode: 0644]
drivers/crypto/armv8/rte_armv8_pmd_private.h [new file with mode: 0644]
drivers/crypto/armv8/rte_armv8_pmd_version.map [new file with mode: 0644]
lib/librte_cryptodev/rte_cryptodev.h
mk/rte.app.mk

index 184c1ea..abe42df 100644 (file)
@@ -421,6 +421,11 @@ Crypto Drivers
 M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 
+ARMv8 Crypto PMD
+M: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+F: drivers/crypto/armv8/
+
 Intel AES-NI GCM PMD
 M: Declan Doherty <declan.doherty@intel.com>
 F: drivers/crypto/aesni_gcm/
index a786d5f..b9fb8e2 100644 (file)
@@ -377,6 +377,12 @@ CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
 CONFIG_RTE_CRYPTO_MAX_DEVS=64
 CONFIG_RTE_CRYPTODEV_NAME_LEN=64
 
+#
+# Compile PMD for ARMv8 Crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n
+CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n
+
 #
 # Compile PMD for QuickAssist based devices
 #
index 4f625e2..680d79b 100755 (executable)
@@ -34,6 +34,7 @@ default_path=$PATH
 
 # Load config options:
 # - AESNI_MULTI_BUFFER_LIB_PATH
+# - ARMV8_CRYPTO_LIB_PATH
 # - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
 # - DPDK_DEP_ARCHIVE
 # - DPDK_DEP_CFLAGS
@@ -129,6 +130,7 @@ reset_env ()
        unset DPDK_DEP_SZE
        unset DPDK_DEP_ZLIB
        unset AESNI_MULTI_BUFFER_LIB_PATH
+       unset ARMV8_CRYPTO_LIB_PATH
        unset LIBSSO_SNOW3G_PATH
        unset LIBSSO_KASUMI_PATH
        unset LIBSSO_ZUC_PATH
@@ -176,6 +178,8 @@ config () # <directory> <target> <options>
                sed -ri            's,(NFP_PMD=)n,\1y,' $1/.config
                test "$DPDK_DEP_PCAP" != y || \
                sed -ri               's,(PCAP=)n,\1y,' $1/.config
+               test -z "$ARMV8_CRYPTO_LIB_PATH" || \
+               sed -ri   's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config
                test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
                sed -ri       's,(PMD_AESNI_MB=)n,\1y,' $1/.config
                test "$DPDK_DEP_ISAL_CRYPTO" != y || \
index 745c614..77b02cf 100644 (file)
@@ -33,6 +33,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
 
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
new file mode 100644 (file)
index 0000000..2003ec4
--- /dev/null
@@ -0,0 +1,72 @@
+#
+#   BSD LICENSE
+#
+#   Copyright (C) Cavium networks Ltd. 2017.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Cavium networks nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(ARMV8_CRYPTO_LIB_PATH),)
+$(error "Please define ARMV8_CRYPTO_LIB_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_armv8.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_armv8_pmd_version.map
+
+# external library dependencies
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
+LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
new file mode 100644 (file)
index 0000000..1bf0f9d
--- /dev/null
@@ -0,0 +1,900 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2017.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static int cryptodev_armv8_crypto_uninit(const char *name);
+
+/**
+ * Pointers to the supported combined mode crypto functions are stored
+ * in the static tables. Each combined (chained) cryptographic operation
+ * can be described by a set of numbers:
+ * - order:    order of operations (cipher, auth) or (auth, cipher)
+ * - direction:        encryption or decryption
+ * - calg:     cipher algorithm such as AES_CBC, AES_CTR, etc.
+ * - aalg:     authentication algorithm such as SHA1, SHA256, etc.
+ * - keyl:     cipher key length, for example 128, 192, 256 bits
+ *
+ * In order to quickly acquire each function pointer based on those numbers,
+ * a hierarchy of arrays is maintained. The final level, 3D array is indexed
+ * by the combined mode function parameters only (cipher algorithm,
+ * authentication algorithm and key length).
+ *
+ * This gives 3 memory accesses to obtain a function pointer instead of
+ * traversing the array manually and comparing function parameters on each loop.
+ *
+ *                   +--+CRYPTO_FUNC
+ *            +--+ENC|
+ *      +--+CA|
+ *      |     +--+DEC
+ * ORDER|
+ *      |     +--+ENC
+ *      +--+AC|
+ *            +--+DEC
+ *
+ */
+
+/**
+ * 3D array type for ARM Combined Mode crypto functions pointers.
+ * CRYPTO_CIPHER_MAX:                  max cipher ID number
+ * CRYPTO_AUTH_MAX:                    max auth ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX:           max key length ID number
+ */
+typedef const crypto_func_t
+crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+/* Evaluate to key length definition */
+#define KEYL(keyl)             (ARMV8_CRYPTO_CIPHER_KEYLEN_ ## keyl)
+
+/* Local aliases for supported ciphers */
+#define CIPH_AES_CBC           RTE_CRYPTO_CIPHER_AES_CBC
+/* Local aliases for supported hashes */
+#define AUTH_SHA1_HMAC         RTE_CRYPTO_AUTH_SHA1_HMAC
+#define AUTH_SHA256_HMAC       RTE_CRYPTO_AUTH_SHA256_HMAC
+
+/**
+ * Arrays containing pointers to particular cryptographic,
+ * combined mode functions.
+ * crypto_op_ca_encrypt:       cipher (encrypt), authenticate
+ * crypto_op_ca_decrypt:       cipher (decrypt), authenticate
+ * crypto_op_ac_encrypt:       authenticate, cipher (encrypt)
+ * crypto_op_ac_decrypt:       authenticate, cipher (decrypt)
+ */
+static const crypto_func_tbl_t
+crypto_op_ca_encrypt = {
+       /* [cipher alg][auth alg][key length] = crypto_function, */
+       [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac,
+       [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac,
+};
+
+static const crypto_func_tbl_t
+crypto_op_ca_decrypt = {
+       NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_encrypt = {
+       NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_decrypt = {
+       /* [cipher alg][auth alg][key length] = crypto_function, */
+       [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec,
+       [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec,
+};
+
+/**
+ * Arrays containing pointers to particular cryptographic function sets,
+ * covering given cipher operation directions (encrypt, decrypt)
+ * for each order of cipher and authentication pairs.
+ */
+static const crypto_func_tbl_t *
+crypto_cipher_auth[] = {
+       &crypto_op_ca_encrypt,
+       &crypto_op_ca_decrypt,
+       NULL
+};
+
+static const crypto_func_tbl_t *
+crypto_auth_cipher[] = {
+       &crypto_op_ac_encrypt,
+       &crypto_op_ac_decrypt,
+       NULL
+};
+
+/**
+ * Top level array containing pointers to particular cryptographic
+ * function sets, covering given order of chained operations.
+ * crypto_cipher_auth: cipher first, authenticate after
+ * crypto_auth_cipher: authenticate first, cipher after
+ */
+static const crypto_func_tbl_t **
+crypto_chain_order[] = {
+       crypto_cipher_auth,
+       crypto_auth_cipher,
+       NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_ALGO(order, cop, calg, aalg, keyl)                  \
+({                                                                     \
+       crypto_func_tbl_t *func_tbl =                                   \
+                               (crypto_chain_order[(order)])[(cop)];   \
+                                                                       \
+       ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]);              \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * 2D array type for ARM key schedule functions pointers.
+ * CRYPTO_CIPHER_MAX:                  max cipher ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX:           max key length ID number
+ */
+typedef const crypto_key_sched_t
+crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_encrypt = {
+       /* [cipher alg][key length] = key_expand_func, */
+       [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc,
+};
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_decrypt = {
+       /* [cipher alg][key length] = key_expand_func, */
+       [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec,
+};
+
+/**
+ * Top level array containing pointers to particular key generation
+ * function sets, covering given operation direction.
+ * crypto_key_sched_encrypt:   keys for encryption
+ * crypto_key_sched_decrypt:   keys for decryption
+ */
+static const crypto_key_sched_tbl_t *
+crypto_key_sched_dir[] = {
+       &crypto_key_sched_encrypt,
+       &crypto_key_sched_decrypt,
+       NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_KEY_SCHED(cop, calg, keyl)                          \
+({                                                                     \
+       crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)];   \
+                                                                       \
+       ((*ks_tbl)[(calg)][KEYL(keyl)]);                                \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum armv8_crypto_chain_order
+armv8_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+
+       /*
+        * This driver currently covers only chained operations.
+        * Ignore only cipher or only authentication operations
+        * or chains longer than 2 xform structures.
+        */
+       if (xform->next == NULL || xform->next->next != NULL)
+               return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+       if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+               if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+                       return ARMV8_CRYPTO_CHAIN_AUTH_CIPHER;
+       }
+
+       if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+               if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+                       return ARMV8_CRYPTO_CHAIN_CIPHER_AUTH;
+       }
+
+       return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+static inline void
+auth_hmac_pad_prepare(struct armv8_crypto_session *sess,
+                               const struct rte_crypto_sym_xform *xform)
+{
+       size_t i;
+
+       /* Generate i_key_pad and o_key_pad */
+       memset(sess->auth.hmac.i_key_pad, 0, sizeof(sess->auth.hmac.i_key_pad));
+       rte_memcpy(sess->auth.hmac.i_key_pad, sess->auth.hmac.key,
+                                                       xform->auth.key.length);
+       memset(sess->auth.hmac.o_key_pad, 0, sizeof(sess->auth.hmac.o_key_pad));
+       rte_memcpy(sess->auth.hmac.o_key_pad, sess->auth.hmac.key,
+                                                       xform->auth.key.length);
+       /*
+        * XOR key with IPAD/OPAD values to obtain i_key_pad
+        * and o_key_pad.
+        * Byte-by-byte operation may seem to be the less efficient
+        * here but in fact it's the opposite.
+        * The result ASM code is likely operate on NEON registers
+        * (load auth key to Qx, load IPAD/OPAD to multiple
+        * elements of Qy, eor 128 bits at once).
+        */
+       for (i = 0; i < SHA_BLOCK_MAX; i++) {
+               sess->auth.hmac.i_key_pad[i] ^= HMAC_IPAD_VALUE;
+               sess->auth.hmac.o_key_pad[i] ^= HMAC_OPAD_VALUE;
+       }
+}
+
+static inline int
+auth_set_prerequisites(struct armv8_crypto_session *sess,
+                       const struct rte_crypto_sym_xform *xform)
+{
+       uint8_t partial[64] = { 0 };
+       int error;
+
+       switch (xform->auth.algo) {
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+               /*
+                * Generate authentication key, i_key_pad and o_key_pad.
+                */
+               /* Zero memory under key */
+               memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
+
+               if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
+                       /*
+                        * In case the key is longer than 160 bits
+                        * the algorithm will use SHA1(key) instead.
+                        */
+                       error = sha1_block(NULL, xform->auth.key.data,
+                               sess->auth.hmac.key, xform->auth.key.length);
+                       if (error != 0)
+                               return -1;
+               } else {
+                       /*
+                        * Now copy the given authentication key to the session
+                        * key assuming that the session key is zeroed there is
+                        * no need for additional zero padding if the key is
+                        * shorter than SHA1_AUTH_KEY_LENGTH.
+                        */
+                       rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+                                                       xform->auth.key.length);
+               }
+
+               /* Prepare HMAC padding: key|pattern */
+               auth_hmac_pad_prepare(sess, xform);
+               /*
+                * Calculate partial hash values for i_key_pad and o_key_pad.
+                * Will be used as initialization state for final HMAC.
+                */
+               error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad,
+                   partial, SHA1_BLOCK_SIZE);
+               if (error != 0)
+                       return -1;
+               memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE);
+
+               error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad,
+                   partial, SHA1_BLOCK_SIZE);
+               if (error != 0)
+                       return -1;
+               memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE);
+
+               break;
+       case RTE_CRYPTO_AUTH_SHA256_HMAC:
+               /*
+                * Generate authentication key, i_key_pad and o_key_pad.
+                */
+               /* Zero memory under key */
+               memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
+
+               if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
+                       /*
+                        * In case the key is longer than 256 bits
+                        * the algorithm will use SHA256(key) instead.
+                        */
+                       error = sha256_block(NULL, xform->auth.key.data,
+                               sess->auth.hmac.key, xform->auth.key.length);
+                       if (error != 0)
+                               return -1;
+               } else {
+                       /*
+                        * Now copy the given authentication key to the session
+                        * key assuming that the session key is zeroed there is
+                        * no need for additional zero padding if the key is
+                        * shorter than SHA256_AUTH_KEY_LENGTH.
+                        */
+                       rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+                                                       xform->auth.key.length);
+               }
+
+               /* Prepare HMAC padding: key|pattern */
+               auth_hmac_pad_prepare(sess, xform);
+               /*
+                * Calculate partial hash values for i_key_pad and o_key_pad.
+                * Will be used as initialization state for final HMAC.
+                */
+               error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad,
+                   partial, SHA256_BLOCK_SIZE);
+               if (error != 0)
+                       return -1;
+               memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE);
+
+               error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad,
+                   partial, SHA256_BLOCK_SIZE);
+               if (error != 0)
+                       return -1;
+               memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE);
+
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static inline int
+cipher_set_prerequisites(struct armv8_crypto_session *sess,
+                       const struct rte_crypto_sym_xform *xform)
+{
+       crypto_key_sched_t cipher_key_sched;
+
+       cipher_key_sched = sess->cipher.key_sched;
+       if (likely(cipher_key_sched != NULL)) {
+               /* Set up cipher session key */
+               cipher_key_sched(sess->cipher.key.data, xform->cipher.key.data);
+       }
+
+       return 0;
+}
+
+static int
+armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
+               const struct rte_crypto_sym_xform *cipher_xform,
+               const struct rte_crypto_sym_xform *auth_xform)
+{
+       enum armv8_crypto_chain_order order;
+       enum armv8_crypto_cipher_operation cop;
+       enum rte_crypto_cipher_algorithm calg;
+       enum rte_crypto_auth_algorithm aalg;
+
+       /* Validate and prepare scratch order of combined operations */
+       switch (sess->chain_order) {
+       case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+       case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+               order = sess->chain_order;
+               break;
+       default:
+               return -EINVAL;
+       }
+       /* Select cipher direction */
+       sess->cipher.direction = cipher_xform->cipher.op;
+       /* Select cipher key */
+       sess->cipher.key.length = cipher_xform->cipher.key.length;
+       /* Set cipher direction */
+       cop = sess->cipher.direction;
+       /* Set cipher algorithm */
+       calg = cipher_xform->cipher.algo;
+
+       /* Select cipher algo */
+       switch (calg) {
+       /* Cover supported cipher algorithms */
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+               sess->cipher.algo = calg;
+               /* IV len is always 16 bytes (block size) for AES CBC */
+               sess->cipher.iv_len = 16;
+               break;
+       default:
+               return -EINVAL;
+       }
+       /* Select auth generate/verify */
+       sess->auth.operation = auth_xform->auth.op;
+
+       /* Select auth algo */
+       switch (auth_xform->auth.algo) {
+       /* Cover supported hash algorithms */
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+       case RTE_CRYPTO_AUTH_SHA256_HMAC: /* Fall through */
+               aalg = auth_xform->auth.algo;
+               sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Verify supported key lengths and extract proper algorithm */
+       switch (cipher_xform->cipher.key.length << 3) {
+       case 128:
+               sess->crypto_func =
+                               CRYPTO_GET_ALGO(order, cop, calg, aalg, 128);
+               sess->cipher.key_sched =
+                               CRYPTO_GET_KEY_SCHED(cop, calg, 128);
+               break;
+       case 192:
+       case 256:
+               /* These key lengths are not supported yet */
+       default: /* Fall through */
+               sess->crypto_func = NULL;
+               sess->cipher.key_sched = NULL;
+               return -EINVAL;
+       }
+
+       if (unlikely(sess->crypto_func == NULL)) {
+               /*
+                * If we got here that means that there must be a bug
+                * in the algorithms selection above. Nevertheless keep
+                * it here to catch bug immediately and avoid NULL pointer
+                * dereference in OPs processing.
+                */
+               ARMV8_CRYPTO_LOG_ERR(
+                       "No appropriate crypto function for given parameters");
+               return -EINVAL;
+       }
+
+       /* Set up cipher session prerequisites */
+       if (cipher_set_prerequisites(sess, cipher_xform) != 0)
+               return -EINVAL;
+
+       /* Set up authentication session prerequisites */
+       if (auth_set_prerequisites(sess, auth_xform) != 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
+               const struct rte_crypto_sym_xform *xform)
+{
+       const struct rte_crypto_sym_xform *cipher_xform = NULL;
+       const struct rte_crypto_sym_xform *auth_xform = NULL;
+       bool is_chained_op;
+       int ret;
+
+       /* Filter out spurious/broken requests */
+       if (xform == NULL)
+               return -EINVAL;
+
+       sess->chain_order = armv8_crypto_get_chain_order(xform);
+       switch (sess->chain_order) {
+       case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+               cipher_xform = xform;
+               auth_xform = xform->next;
+               is_chained_op = true;
+               break;
+       case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+               auth_xform = xform;
+               cipher_xform = xform->next;
+               is_chained_op = true;
+               break;
+       default:
+               is_chained_op = false;
+               return -EINVAL;
+       }
+
+       if (is_chained_op) {
+               ret = armv8_crypto_set_session_chained_parameters(sess,
+                                               cipher_xform, auth_xform);
+               if (unlikely(ret != 0)) {
+                       ARMV8_CRYPTO_LOG_ERR(
+                       "Invalid/unsupported chained (cipher/auth) parameters");
+                       return -EINVAL;
+               }
+       } else {
+               ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/** Provide session for operation */
+static inline struct armv8_crypto_session *
+get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
+{
+       struct armv8_crypto_session *sess = NULL;
+
+       if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+               /* get existing session */
+               if (likely(op->sym->session != NULL &&
+                               op->sym->session->dev_type ==
+                               RTE_CRYPTODEV_ARMV8_PMD)) {
+                       sess = (struct armv8_crypto_session *)
+                               op->sym->session->_private;
+               }
+       } else {
+               /* provide internal session */
+               void *_sess = NULL;
+
+               if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+                       sess = (struct armv8_crypto_session *)
+                               ((struct rte_cryptodev_sym_session *)_sess)
+                               ->_private;
+
+                       if (unlikely(armv8_crypto_set_session_parameters(
+                                       sess, op->sym->xform) != 0)) {
+                               rte_mempool_put(qp->sess_mp, _sess);
+                               sess = NULL;
+                       } else
+                               op->sym->session = _sess;
+               }
+       }
+
+       if (unlikely(sess == NULL))
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+       return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------*/
+
+/** Process cipher operation */
+static inline void
+process_armv8_chained_op
+               (struct rte_crypto_op *op, struct armv8_crypto_session *sess,
+               struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+       crypto_func_t crypto_func;
+       crypto_arg_t arg;
+       struct rte_mbuf *m_asrc, *m_adst;
+       uint8_t *csrc, *cdst;
+       uint8_t *adst, *asrc;
+       uint64_t clen, alen;
+       int error;
+
+       clen = op->sym->cipher.data.length;
+       alen = op->sym->auth.data.length;
+
+       csrc = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+                       op->sym->cipher.data.offset);
+       cdst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+                       op->sym->cipher.data.offset);
+
+       switch (sess->chain_order) {
+       case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+               m_asrc = m_adst = mbuf_dst;
+               break;
+       case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+               m_asrc = mbuf_src;
+               m_adst = mbuf_dst;
+               break;
+       default:
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return;
+       }
+       asrc = rte_pktmbuf_mtod_offset(m_asrc, uint8_t *,
+                               op->sym->auth.data.offset);
+
+       switch (sess->auth.mode) {
+       case ARMV8_CRYPTO_AUTH_AS_AUTH:
+               /* Nothing to do here, just verify correct option */
+               break;
+       case ARMV8_CRYPTO_AUTH_AS_HMAC:
+               arg.digest.hmac.key = sess->auth.hmac.key;
+               arg.digest.hmac.i_key_pad = sess->auth.hmac.i_key_pad;
+               arg.digest.hmac.o_key_pad = sess->auth.hmac.o_key_pad;
+               break;
+       default:
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return;
+       }
+
+       if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE) {
+               adst = op->sym->auth.digest.data;
+               if (adst == NULL) {
+                       adst = rte_pktmbuf_mtod_offset(m_adst,
+                                       uint8_t *,
+                                       op->sym->auth.data.offset +
+                                       op->sym->auth.data.length);
+               }
+       } else {
+               adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
+                               op->sym->auth.digest.length);
+       }
+
+       if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return;
+       }
+
+       arg.cipher.iv = op->sym->cipher.iv.data;
+       arg.cipher.key = sess->cipher.key.data;
+       /* Acquire combined mode function */
+       crypto_func = sess->crypto_func;
+       ARMV8_CRYPTO_ASSERT(crypto_func != NULL);
+       error = crypto_func(csrc, cdst, clen, asrc, adst, alen, &arg);
+       if (error != 0) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return;
+       }
+
+       op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+       if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+               if (memcmp(adst, op->sym->auth.digest.data,
+                               op->sym->auth.digest.length) != 0) {
+                       op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+               }
+               /* Trim area used for digest from mbuf. */
+               rte_pktmbuf_trim(m_asrc,
+                               op->sym->auth.digest.length);
+       }
+}
+
+/** Process crypto operation for mbuf */
+static inline int
+process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+               struct armv8_crypto_session *sess)
+{
+       struct rte_mbuf *msrc, *mdst;
+
+       msrc = op->sym->m_src;
+       mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+       op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+       switch (sess->chain_order) {
+       case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+       case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
+               process_armv8_chained_op(op, sess, msrc, mdst);
+               break;
+       default:
+               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+               break;
+       }
+
+       /* Free session if a session-less crypto op */
+       if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+               memset(sess, 0, sizeof(struct armv8_crypto_session));
+               rte_mempool_put(qp->sess_mp, op->sym->session);
+               op->sym->session = NULL;
+       }
+
+       if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+               op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+       if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ERROR))
+               return -1;
+
+       return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+               uint16_t nb_ops)
+{
+       struct armv8_crypto_session *sess;
+       struct armv8_crypto_qp *qp = queue_pair;
+       int i, retval;
+
+       for (i = 0; i < nb_ops; i++) {
+               sess = get_session(qp, ops[i]);
+               if (unlikely(sess == NULL))
+                       goto enqueue_err;
+
+               retval = process_op(qp, ops[i], sess);
+               if (unlikely(retval < 0))
+                       goto enqueue_err;
+       }
+
+       retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i);
+       qp->stats.enqueued_count += retval;
+
+       return retval;
+
+enqueue_err:
+       retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i);
+       if (ops[i] != NULL)
+               ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+       qp->stats.enqueue_err_count++;
+       return retval;
+}
+
+/** Dequeue burst */
+static uint16_t
+armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+               uint16_t nb_ops)
+{
+       struct armv8_crypto_qp *qp = queue_pair;
+
+       unsigned int nb_dequeued = 0;
+
+       nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+                       (void **)ops, nb_ops);
+       qp->stats.dequeued_count += nb_dequeued;
+
+       return nb_dequeued;
+}
+
+/** Create ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_create(struct rte_crypto_vdev_init_params *init_params)
+{
+       struct rte_cryptodev *dev;
+       struct armv8_crypto_private *internals;
+       int ret;
+
+       /* Check CPU for support for AES instruction set */
+       if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+               ARMV8_CRYPTO_LOG_ERR(
+                       "AES instructions not supported by CPU");
+               return -EFAULT;
+       }
+
+       /* Check CPU for support for SHA instruction set */
+       if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA1) ||
+           !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA2)) {
+               ARMV8_CRYPTO_LOG_ERR(
+                       "SHA1/SHA2 instructions not supported by CPU");
+               return -EFAULT;
+       }
+
+       /* Check CPU for support for Advance SIMD instruction set */
+       if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+               ARMV8_CRYPTO_LOG_ERR(
+                       "Advanced SIMD instructions not supported by CPU");
+               return -EFAULT;
+       }
+
+       if (init_params->name[0] == '\0') {
+               ret = rte_cryptodev_pmd_create_dev_name(
+                               init_params->name,
+                               RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
+
+               if (ret < 0) {
+                       ARMV8_CRYPTO_LOG_ERR("failed to create unique name");
+                       return ret;
+               }
+       }
+
+       dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+                               sizeof(struct armv8_crypto_private),
+                               init_params->socket_id);
+       if (dev == NULL) {
+               ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+               goto init_error;
+       }
+
+       dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD;
+       dev->dev_ops = rte_armv8_crypto_pmd_ops;
+
+       /* register rx/tx burst functions for data path */
+       dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
+       dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+
+       dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+                       RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+       /* Set vector instructions mode supported */
+       internals = dev->data->dev_private;
+
+       internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+       internals->max_nb_sessions = init_params->max_nb_sessions;
+
+       return 0;
+
+init_error:
+       ARMV8_CRYPTO_LOG_ERR(
+               "driver %s: cryptodev_armv8_crypto_create failed",
+               init_params->name);
+
+       cryptodev_armv8_crypto_uninit(init_params->name);
+       return -EFAULT;
+}
+
+/** Initialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_init(const char *name,
+               const char *input_args)
+{
+       struct rte_crypto_vdev_init_params init_params = {
+               RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+               RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+               rte_socket_id(),
+               {0}
+       };
+
+       rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+       RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+                       init_params.socket_id);
+       if (init_params.name[0] != '\0') {
+               RTE_LOG(INFO, PMD, "  User defined name = %s\n",
+                       init_params.name);
+       }
+       RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
+                       init_params.max_nb_queue_pairs);
+       RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
+                       init_params.max_nb_sessions);
+
+       return cryptodev_armv8_crypto_create(&init_params);
+}
+
+/** Uninitialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_uninit(const char *name)
+{
+       if (name == NULL)
+               return -EINVAL;
+
+       RTE_LOG(INFO, PMD,
+               "Closing ARMv8 crypto device %s on numa socket %u\n",
+               name, rte_socket_id());
+
+       return 0;
+}
+
+static struct rte_vdev_driver armv8_crypto_drv = {
+       .probe = cryptodev_armv8_crypto_init,
+       .remove = cryptodev_armv8_crypto_uninit
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
+       "max_nb_queue_pairs=<int> "
+       "max_nb_sessions=<int> "
+       "socket_id=<int>");
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
new file mode 100644 (file)
index 0000000..2bf6475
--- /dev/null
@@ -0,0 +1,369 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2017.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities
+       armv8_crypto_pmd_capabilities[] = {
+       {       /* SHA1 HMAC */
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+                       {.sym = {
+                               .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+                               {.auth = {
+                                       .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+                                       .block_size = 64,
+                                       .key_size = {
+                                               .min = 16,
+                                               .max = 128,
+                                               .increment = 0
+                                       },
+                                       .digest_size = {
+                                               .min = 20,
+                                               .max = 20,
+                                               .increment = 0
+                                       },
+                                       .aad_size = { 0 }
+                               }, }
+                       }, }
+       },
+       {       /* SHA256 HMAC */
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+                       {.sym = {
+                               .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+                               {.auth = {
+                                       .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+                                       .block_size = 64,
+                                       .key_size = {
+                                               .min = 16,
+                                               .max = 128,
+                                               .increment = 0
+                                       },
+                                       .digest_size = {
+                                               .min = 32,
+                                               .max = 32,
+                                               .increment = 0
+                                       },
+                                       .aad_size = { 0 }
+                               }, }
+                       }, }
+       },
+       {       /* AES CBC */
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+                       {.sym = {
+                               .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+                               {.cipher = {
+                                       .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+                                       .block_size = 16,
+                                       .key_size = {
+                                               .min = 16,
+                                               .max = 16,
+                                               .increment = 0
+                                       },
+                                       .iv_size = {
+                                               .min = 16,
+                                               .max = 16,
+                                               .increment = 0
+                                       }
+                               }, }
+                       }, }
+       },
+
+       RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+       return 0;
+}
+
+/** Start device */
+static int
+armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+       return 0;
+}
+
+/** Stop device */
+static void
+armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+       return 0;
+}
+
+
+/** Get device statistics */
+static void
+armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+               struct rte_cryptodev_stats *stats)
+{
+       int qp_id;
+
+       for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+               struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+               stats->enqueued_count += qp->stats.enqueued_count;
+               stats->dequeued_count += qp->stats.dequeued_count;
+
+               stats->enqueue_err_count += qp->stats.enqueue_err_count;
+               stats->dequeue_err_count += qp->stats.dequeue_err_count;
+       }
+}
+
+/** Reset device statistics */
+static void
+armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+       int qp_id;
+
+       for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+               struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+               memset(&qp->stats, 0, sizeof(qp->stats));
+       }
+}
+
+
+/** Get device info */
+static void
+armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
+               struct rte_cryptodev_info *dev_info)
+{
+       struct armv8_crypto_private *internals = dev->data->dev_private;
+
+       if (dev_info != NULL) {
+               dev_info->dev_type = dev->dev_type;
+               dev_info->feature_flags = dev->feature_flags;
+               dev_info->capabilities = armv8_crypto_pmd_capabilities;
+               dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+               dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+       }
+}
+
+/** Release queue pair */
+static int
+armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+
+       if (dev->data->queue_pairs[qp_id] != NULL) {
+               rte_free(dev->data->queue_pairs[qp_id]);
+               dev->data->queue_pairs[qp_id] = NULL;
+       }
+
+       return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+               struct armv8_crypto_qp *qp)
+{
+       unsigned int n;
+
+       n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
+                       dev->data->dev_id, qp->id);
+
+       if (n > sizeof(qp->name))
+               return -1;
+
+       return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
+               unsigned int ring_size, int socket_id)
+{
+       struct rte_ring *r;
+
+       r = rte_ring_lookup(qp->name);
+       if (r) {
+               if (r->prod.size >= ring_size) {
+                       ARMV8_CRYPTO_LOG_INFO(
+                               "Reusing existing ring %s for processed ops",
+                                qp->name);
+                       return r;
+               }
+
+               ARMV8_CRYPTO_LOG_ERR(
+                       "Unable to reuse existing ring %s for processed ops",
+                        qp->name);
+               return NULL;
+       }
+
+       return rte_ring_create(qp->name, ring_size, socket_id,
+                       RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+               const struct rte_cryptodev_qp_conf *qp_conf,
+                int socket_id)
+{
+       struct armv8_crypto_qp *qp = NULL;
+
+       /* Free memory prior to re-allocation if needed. */
+       if (dev->data->queue_pairs[qp_id] != NULL)
+               armv8_crypto_pmd_qp_release(dev, qp_id);
+
+       /* Allocate the queue pair data structure. */
+       qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp),
+                                       RTE_CACHE_LINE_SIZE, socket_id);
+       if (qp == NULL)
+               return -ENOMEM;
+
+       qp->id = qp_id;
+       dev->data->queue_pairs[qp_id] = qp;
+
+       if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0)
+               goto qp_setup_cleanup;
+
+       qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp,
+                       qp_conf->nb_descriptors, socket_id);
+       if (qp->processed_ops == NULL)
+               goto qp_setup_cleanup;
+
+       qp->sess_mp = dev->data->session_pool;
+
+       memset(&qp->stats, 0, sizeof(qp->stats));
+
+       return 0;
+
+qp_setup_cleanup:
+       if (qp)
+               rte_free(qp);
+
+       return -1;
+}
+
+/** Start queue pair */
+static int
+armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+               __rte_unused uint16_t queue_pair_id)
+{
+       return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+               __rte_unused uint16_t queue_pair_id)
+{
+       return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+       return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+       return sizeof(struct armv8_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+               struct rte_crypto_sym_xform *xform, void *sess)
+{
+       if (unlikely(sess == NULL)) {
+               ARMV8_CRYPTO_LOG_ERR("invalid session struct");
+               return NULL;
+       }
+
+       if (armv8_crypto_set_session_parameters(
+                       sess, xform) != 0) {
+               ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
+               return NULL;
+       }
+
+       return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+                               void *sess)
+{
+
+       /* Zero out the whole structure */
+       if (sess)
+               memset(sess, 0, sizeof(struct armv8_crypto_session));
+}
+
+struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
+               .dev_configure          = armv8_crypto_pmd_config,
+               .dev_start              = armv8_crypto_pmd_start,
+               .dev_stop               = armv8_crypto_pmd_stop,
+               .dev_close              = armv8_crypto_pmd_close,
+
+               .stats_get              = armv8_crypto_pmd_stats_get,
+               .stats_reset            = armv8_crypto_pmd_stats_reset,
+
+               .dev_infos_get          = armv8_crypto_pmd_info_get,
+
+               .queue_pair_setup       = armv8_crypto_pmd_qp_setup,
+               .queue_pair_release     = armv8_crypto_pmd_qp_release,
+               .queue_pair_start       = armv8_crypto_pmd_qp_start,
+               .queue_pair_stop        = armv8_crypto_pmd_qp_stop,
+               .queue_pair_count       = armv8_crypto_pmd_qp_count,
+
+               .session_get_size       = armv8_crypto_pmd_session_get_size,
+               .session_configure      = armv8_crypto_pmd_session_configure,
+               .session_clear          = armv8_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
new file mode 100644 (file)
index 0000000..b75107f
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2017.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
+#define _RTE_ARMV8_PMD_PRIVATE_H_
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+       RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+                       RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+                       __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+       RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+                       RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+                       __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+       RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+                       RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+                       __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_ASSERT(con)                               \
+do {                                                           \
+       if (!(con)) {                                           \
+               rte_panic("%s(): "                              \
+                   con "condition failed, line %u", __func__); \
+       }                                                       \
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define NBBY           8               /* Number of bits in a byte */
+#define BYTE_LENGTH(x) ((x) / NBBY)    /* Number of bytes in x (round down) */
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+       ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+       ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+       ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+       ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+       ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+       ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+       ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+       ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+       ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+       ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+       ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+       ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+       ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+               ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+       ARMV8_CRYPTO_AUTH_AS_AUTH,
+       ARMV8_CRYPTO_AUTH_AS_HMAC,
+       ARMV8_CRYPTO_AUTH_AS_CIPHER,
+       ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+       ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define CRYPTO_ORDER_MAX               ARMV8_CRYPTO_CHAIN_LIST_END
+#define CRYPTO_CIPHER_OP_MAX           ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define CRYPTO_CIPHER_KEYLEN_MAX       ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define CRYPTO_CIPHER_MAX              RTE_CRYPTO_CIPHER_LIST_END
+#define CRYPTO_AUTH_MAX                        RTE_CRYPTO_AUTH_LIST_END
+
+#define HMAC_IPAD_VALUE                        (0x36)
+#define HMAC_OPAD_VALUE                        (0x5C)
+
+#define SHA256_AUTH_KEY_LENGTH         (BYTE_LENGTH(256))
+#define SHA256_BLOCK_SIZE              (BYTE_LENGTH(512))
+
+#define SHA1_AUTH_KEY_LENGTH           (BYTE_LENGTH(160))
+#define SHA1_BLOCK_SIZE                        (BYTE_LENGTH(512))
+
+#define SHA_AUTH_KEY_MAX               SHA256_AUTH_KEY_LENGTH
+#define SHA_BLOCK_MAX                  SHA256_BLOCK_SIZE
+
+typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
+                               uint8_t *, uint8_t *, uint64_t,
+                               crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+       unsigned int max_nb_qpairs;
+       /**< Max number of queue pairs */
+       unsigned int max_nb_sessions;
+       /**< Max number of sessions */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+       uint16_t id;
+       /**< Queue Pair Identifier */
+       struct rte_ring *processed_ops;
+       /**< Ring for placing process packets */
+       struct rte_mempool *sess_mp;
+       /**< Session Mempool */
+       struct rte_cryptodev_stats stats;
+       /**< Queue pair statistics */
+       char name[RTE_CRYPTODEV_NAME_LEN];
+       /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+       enum armv8_crypto_chain_order chain_order;
+       /**< chain order mode */
+       crypto_func_t crypto_func;
+       /**< cryptographic function to use for this session */
+
+       /** Cipher Parameters */
+       struct {
+               enum rte_crypto_cipher_operation direction;
+               /**< cipher operation direction */
+               enum rte_crypto_cipher_algorithm algo;
+               /**< cipher algorithm */
+               int iv_len;
+               /**< IV length */
+
+               struct {
+                       uint8_t data[256];
+                       /**< key data */
+                       size_t length;
+                       /**< key length in bytes */
+               } key;
+
+               crypto_key_sched_t key_sched;
+               /**< Key schedule function */
+       } cipher;
+
+       /** Authentication Parameters */
+       struct {
+               enum rte_crypto_auth_operation operation;
+               /**< auth operation generate or verify */
+               enum armv8_crypto_auth_mode mode;
+               /**< auth operation mode */
+
+               union {
+                       struct {
+                               /* Add data if needed */
+                       } auth;
+
+                       struct {
+                               uint8_t i_key_pad[SHA_BLOCK_MAX]
+                                                       __rte_cache_aligned;
+                               /**< inner pad (max supported block length) */
+                               uint8_t o_key_pad[SHA_BLOCK_MAX]
+                                                       __rte_cache_aligned;
+                               /**< outer pad (max supported block length) */
+                               uint8_t key[SHA_AUTH_KEY_MAX];
+                               /**< HMAC key (max supported length)*/
+                       } hmac;
+               };
+       } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+               struct armv8_crypto_session *sess,
+               const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_version.map b/drivers/crypto/armv8/rte_armv8_pmd_version.map
new file mode 100644 (file)
index 0000000..1f84b68
--- /dev/null
@@ -0,0 +1,3 @@
+DPDK_17.02 {
+       local: *;
+};
index f4e66e6..452b174 100644 (file)
@@ -66,6 +66,8 @@ extern "C" {
 /**< KASUMI PMD device name */
 #define CRYPTODEV_NAME_ZUC_PMD         crypto_zuc
 /**< KASUMI PMD device name */
+#define CRYPTODEV_NAME_ARMV8_PMD       crypto_armv8
+/**< ARMv8 Crypto PMD device name */
 
 /** Crypto device type */
 enum rte_cryptodev_type {
@@ -77,6 +79,7 @@ enum rte_cryptodev_type {
        RTE_CRYPTODEV_KASUMI_PMD,       /**< KASUMI PMD */
        RTE_CRYPTODEV_ZUC_PMD,          /**< ZUC PMD */
        RTE_CRYPTODEV_OPENSSL_PMD,    /**<  OpenSSL PMD */
+       RTE_CRYPTODEV_ARMV8_PMD,        /**< ARMv8 crypto PMD */
 };
 
 extern const char **rte_cyptodev_names;
index 1cd3178..a5daa84 100644 (file)
@@ -147,6 +147,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI)      += -lrte_pmd_kasumi
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI)      += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC)         += -lrte_pmd_zuc
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC)         += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO)    += -lrte_pmd_armv8
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO)    += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS