X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Farmv8%2Frte_armv8_pmd.c;h=c642ac350fbe5dfec74786e2c3771b34469a0369;hb=eeded2044af5bbe88220120b14933536cbb3edb6;hp=3d603a5ac3f4e92ede02c743ee033fc4768f1276;hpb=168b9e76f9168fa3d2c79b3931215597976111c4;p=dpdk.git diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c index 3d603a5ac3..c642ac350f 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd.c +++ b/drivers/crypto/armv8/rte_armv8_pmd.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium networks Ltd. 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium networks nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include @@ -36,13 +8,15 @@ #include #include #include -#include +#include #include #include -#include "armv8_crypto_defs.h" +#include "AArch64cryptolib.h" + +#include "armv8_pmd_private.h" -#include "rte_armv8_pmd_private.h" +static uint8_t cryptodev_driver_id; static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev); @@ -104,25 +78,29 @@ crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX]; static const crypto_func_tbl_t crypto_op_ca_encrypt = { /* [cipher alg][auth alg][key length] = crypto_function, */ - [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac, - [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac, + [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = + armv8_enc_aes_cbc_sha1_128, + [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = + armv8_enc_aes_cbc_sha256_128, }; static const crypto_func_tbl_t crypto_op_ca_decrypt = { - NULL + { {NULL} } }; static const crypto_func_tbl_t crypto_op_ac_encrypt = { - NULL + { {NULL} } }; static const crypto_func_tbl_t crypto_op_ac_decrypt = { /* [cipher alg][auth alg][key length] = crypto_function, */ - [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec, - [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec, + [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = + armv8_dec_aes_cbc_sha1_128, + [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = + armv8_dec_aes_cbc_sha256_128, }; /** @@ -165,7 +143,8 @@ crypto_chain_order[] = { crypto_func_tbl_t *func_tbl = \ (crypto_chain_order[(order)])[(cop)]; \ \ - ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]); \ + ((calg >= CRYPTO_CIPHER_MAX) || (aalg >= CRYPTO_AUTH_MAX)) ? \ + NULL : ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]); \ }) /*----------------------------------------------------------------------------*/ @@ -181,13 +160,13 @@ crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX]; static const crypto_key_sched_tbl_t crypto_key_sched_encrypt = { /* [cipher alg][key length] = key_expand_func, */ - [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc, + [CIPH_AES_CBC][KEYL(128)] = armv8_expandkeys_enc_aes_cbc_128, }; static const crypto_key_sched_tbl_t crypto_key_sched_decrypt = { /* [cipher alg][key length] = key_expand_func, */ - [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec, + [CIPH_AES_CBC][KEYL(128)] = armv8_expandkeys_dec_aes_cbc_128, }; /** @@ -210,7 +189,8 @@ crypto_key_sched_dir[] = { ({ \ crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)]; \ \ - ((*ks_tbl)[(calg)][KEYL(keyl)]); \ + (calg >= CRYPTO_CIPHER_MAX) ? \ + NULL : ((*ks_tbl)[(calg)][KEYL(keyl)]); \ }) /*----------------------------------------------------------------------------*/ @@ -288,27 +268,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess, * Generate authentication key, i_key_pad and o_key_pad. */ /* Zero memory under key */ - memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH); - - if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) { - /* - * In case the key is longer than 160 bits - * the algorithm will use SHA1(key) instead. - */ - error = sha1_block(NULL, xform->auth.key.data, - sess->auth.hmac.key, xform->auth.key.length); - if (error != 0) - return -1; - } else { - /* - * Now copy the given authentication key to the session - * key assuming that the session key is zeroed there is - * no need for additional zero padding if the key is - * shorter than SHA1_AUTH_KEY_LENGTH. - */ - rte_memcpy(sess->auth.hmac.key, xform->auth.key.data, - xform->auth.key.length); - } + memset(sess->auth.hmac.key, 0, SHA1_BLOCK_SIZE); + + /* + * Now copy the given authentication key to the session + * key. + */ + rte_memcpy(sess->auth.hmac.key, xform->auth.key.data, + xform->auth.key.length); /* Prepare HMAC padding: key|pattern */ auth_hmac_pad_prepare(sess, xform); @@ -316,14 +283,16 @@ auth_set_prerequisites(struct armv8_crypto_session *sess, * Calculate partial hash values for i_key_pad and o_key_pad. * Will be used as initialization state for final HMAC. */ - error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad, - partial, SHA1_BLOCK_SIZE); + error = armv8_sha1_block_partial(NULL, + sess->auth.hmac.i_key_pad, + partial, SHA1_BLOCK_SIZE); if (error != 0) return -1; memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE); - error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad, - partial, SHA1_BLOCK_SIZE); + error = armv8_sha1_block_partial(NULL, + sess->auth.hmac.o_key_pad, + partial, SHA1_BLOCK_SIZE); if (error != 0) return -1; memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE); @@ -334,27 +303,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess, * Generate authentication key, i_key_pad and o_key_pad. */ /* Zero memory under key */ - memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH); - - if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) { - /* - * In case the key is longer than 256 bits - * the algorithm will use SHA256(key) instead. - */ - error = sha256_block(NULL, xform->auth.key.data, - sess->auth.hmac.key, xform->auth.key.length); - if (error != 0) - return -1; - } else { - /* - * Now copy the given authentication key to the session - * key assuming that the session key is zeroed there is - * no need for additional zero padding if the key is - * shorter than SHA256_AUTH_KEY_LENGTH. - */ - rte_memcpy(sess->auth.hmac.key, xform->auth.key.data, - xform->auth.key.length); - } + memset(sess->auth.hmac.key, 0, SHA256_BLOCK_SIZE); + + /* + * Now copy the given authentication key to the session + * key. + */ + rte_memcpy(sess->auth.hmac.key, xform->auth.key.data, + xform->auth.key.length); /* Prepare HMAC padding: key|pattern */ auth_hmac_pad_prepare(sess, xform); @@ -362,14 +318,16 @@ auth_set_prerequisites(struct armv8_crypto_session *sess, * Calculate partial hash values for i_key_pad and o_key_pad. * Will be used as initialization state for final HMAC. */ - error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad, - partial, SHA256_BLOCK_SIZE); + error = armv8_sha256_block_partial(NULL, + sess->auth.hmac.i_key_pad, + partial, SHA256_BLOCK_SIZE); if (error != 0) return -1; memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE); - error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad, - partial, SHA256_BLOCK_SIZE); + error = armv8_sha256_block_partial(NULL, + sess->auth.hmac.o_key_pad, + partial, SHA256_BLOCK_SIZE); if (error != 0) return -1; memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE); @@ -414,14 +372,23 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess, order = sess->chain_order; break; default: - return -EINVAL; + return -ENOTSUP; } /* Select cipher direction */ sess->cipher.direction = cipher_xform->cipher.op; /* Select cipher key */ sess->cipher.key.length = cipher_xform->cipher.key.length; /* Set cipher direction */ - cop = sess->cipher.direction; + switch (sess->cipher.direction) { + case RTE_CRYPTO_CIPHER_OP_ENCRYPT: + cop = ARMV8_CRYPTO_CIPHER_OP_ENCRYPT; + break; + case RTE_CRYPTO_CIPHER_OP_DECRYPT: + cop = ARMV8_CRYPTO_CIPHER_OP_DECRYPT; + break; + default: + return -ENOTSUP; + } /* Set cipher algorithm */ calg = cipher_xform->cipher.algo; @@ -431,10 +398,10 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess, case RTE_CRYPTO_CIPHER_AES_CBC: sess->cipher.algo = calg; /* IV len is always 16 bytes (block size) for AES CBC */ - sess->cipher.iv_len = 16; + sess->cipher.iv.length = 16; break; default: - return -EINVAL; + return -ENOTSUP; } /* Select auth generate/verify */ sess->auth.operation = auth_xform->auth.op; @@ -448,9 +415,12 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess, sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC; break; default: - return -EINVAL; + return -ENOTSUP; } + /* Set the digest length */ + sess->auth.digest_length = auth_xform->auth.digest_length; + /* Verify supported key lengths and extract proper algorithm */ switch (cipher_xform->cipher.key.length << 3) { case 128: @@ -465,10 +435,11 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess, default: /* Fall through */ sess->crypto_func = NULL; sess->cipher.key_sched = NULL; - return -EINVAL; + return -ENOTSUP; } - if (unlikely(sess->crypto_func == NULL)) { + if (unlikely(sess->crypto_func == NULL || + sess->cipher.key_sched == NULL)) { /* * If we got here that means that there must be a bug * in the algorithms selection above. Nevertheless keep @@ -519,20 +490,23 @@ armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess, break; default: is_chained_op = false; - return -EINVAL; + return -ENOTSUP; } + /* Set IV offset */ + sess->cipher.iv.offset = cipher_xform->cipher.iv.offset; + if (is_chained_op) { ret = armv8_crypto_set_session_chained_parameters(sess, cipher_xform, auth_xform); if (unlikely(ret != 0)) { ARMV8_CRYPTO_LOG_ERR( "Invalid/unsupported chained (cipher/auth) parameters"); - return -EINVAL; + return ret; } } else { ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation"); - return -EINVAL; + return -ENOTSUP; } return 0; @@ -544,30 +518,37 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op) { struct armv8_crypto_session *sess = NULL; - if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { /* get existing session */ - if (likely(op->sym->session != NULL && - op->sym->session->dev_type == - RTE_CRYPTODEV_ARMV8_PMD)) { + if (likely(op->sym->session != NULL)) { sess = (struct armv8_crypto_session *) - op->sym->session->_private; + get_sym_session_private_data( + op->sym->session, + cryptodev_driver_id); } } else { /* provide internal session */ void *_sess = NULL; + void *_sess_private_data = NULL; - if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) { - sess = (struct armv8_crypto_session *) - ((struct rte_cryptodev_sym_session *)_sess) - ->_private; - - if (unlikely(armv8_crypto_set_session_parameters( - sess, op->sym->xform) != 0)) { - rte_mempool_put(qp->sess_mp, _sess); - sess = NULL; - } else - op->sym->session = _sess; + if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) + return NULL; + + if (rte_mempool_get(qp->sess_mp_priv, + (void **)&_sess_private_data)) + return NULL; + + sess = (struct armv8_crypto_session *)_sess_private_data; + + if (unlikely(armv8_crypto_set_session_parameters(sess, + op->sym->xform) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp_priv, _sess_private_data); + sess = NULL; } + op->sym->session = (struct rte_cryptodev_sym_session *)_sess; + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -586,12 +567,12 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op) /** Process cipher operation */ static inline void -process_armv8_chained_op - (struct rte_crypto_op *op, struct armv8_crypto_session *sess, +process_armv8_chained_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op, + struct armv8_crypto_session *sess, struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) { crypto_func_t crypto_func; - crypto_arg_t arg; + armv8_cipher_digest_t arg; struct rte_mbuf *m_asrc, *m_adst; uint8_t *csrc, *cdst; uint8_t *adst, *asrc; @@ -644,20 +625,15 @@ process_armv8_chained_op op->sym->auth.data.length); } } else { - adst = (uint8_t *)rte_pktmbuf_append(m_asrc, - op->sym->auth.digest.length); + adst = qp->temp_digest; } - if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) { - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return; - } - - arg.cipher.iv = op->sym->cipher.iv.data; + arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->cipher.iv.offset); arg.cipher.key = sess->cipher.key.data; /* Acquire combined mode function */ crypto_func = sess->crypto_func; - ARMV8_CRYPTO_ASSERT(crypto_func != NULL); + RTE_VERIFY(crypto_func != NULL); error = crypto_func(csrc, cdst, clen, asrc, adst, alen, &arg); if (error != 0) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; @@ -667,18 +643,15 @@ process_armv8_chained_op op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { if (memcmp(adst, op->sym->auth.digest.data, - op->sym->auth.digest.length) != 0) { + sess->auth.digest_length) != 0) { op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; } - /* Trim area used for digest from mbuf. */ - rte_pktmbuf_trim(m_asrc, - op->sym->auth.digest.length); } } /** Process crypto operation for mbuf */ static inline int -process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op, +process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op, struct armv8_crypto_session *sess) { struct rte_mbuf *msrc, *mdst; @@ -691,7 +664,7 @@ process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op, switch (sess->chain_order) { case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH: case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */ - process_armv8_chained_op(op, sess, msrc, mdst); + process_armv8_chained_op(qp, op, sess, msrc, mdst); break; default: op->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -699,8 +672,12 @@ process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op, } /* Free session if a session-less crypto op */ - if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct armv8_crypto_session)); + memset(op->sym->session, 0, + rte_cryptodev_sym_get_existing_header_session_size( + op->sym->session)); + rte_mempool_put(qp->sess_mp_priv, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; } @@ -775,7 +752,7 @@ armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, static int cryptodev_armv8_crypto_create(const char *name, struct rte_vdev_device *vdev, - struct rte_crypto_vdev_init_params *init_params) + struct rte_cryptodev_pmd_init_params *init_params) { struct rte_cryptodev *dev; struct armv8_crypto_private *internals; @@ -802,19 +779,13 @@ cryptodev_armv8_crypto_create(const char *name, return -EFAULT; } - if (init_params->name[0] == '\0') - snprintf(init_params->name, sizeof(init_params->name), - "%s", name); - - dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name, - sizeof(struct armv8_crypto_private), - init_params->socket_id); + dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); goto init_error; } - dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD; + dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_armv8_crypto_pmd_ops; /* register rx/tx burst functions for data path */ @@ -824,13 +795,12 @@ cryptodev_armv8_crypto_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_CPU_NEON | - RTE_CRYPTODEV_FF_CPU_ARM_CE; + RTE_CRYPTODEV_FF_CPU_ARM_CE | + RTE_CRYPTODEV_FF_SYM_SESSIONLESS; - /* Set vector instructions mode supported */ internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; @@ -847,11 +817,11 @@ init_error: static int cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev) { - struct rte_crypto_vdev_init_params init_params = { - RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, + struct rte_cryptodev_pmd_init_params init_params = { + "", + sizeof(struct armv8_crypto_private), rte_socket_id(), - {0} + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -860,18 +830,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev) if (name == NULL) return -EINVAL; input_args = rte_vdev_device_args(vdev); - rte_cryptodev_parse_vdev_init_params(&init_params, input_args); - - RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, - init_params.socket_id); - if (init_params.name[0] != '\0') { - RTE_LOG(INFO, PMD, " User defined name = %s\n", - init_params.name); - } - RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", - init_params.max_nb_queue_pairs); - RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", - init_params.max_nb_sessions); + rte_cryptodev_pmd_parse_input_args(&init_params, input_args); return cryptodev_armv8_crypto_create(name, vdev, &init_params); } @@ -880,6 +839,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev) static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev) { + struct rte_cryptodev *cryptodev; const char *name; name = rte_vdev_device_name(vdev); @@ -890,17 +850,26 @@ cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev) "Closing ARMv8 crypto device %s on numa socket %u\n", name, rte_socket_id()); - return 0; + cryptodev = rte_cryptodev_pmd_get_named_dev(name); + if (cryptodev == NULL) + return -ENODEV; + + return rte_cryptodev_pmd_destroy(cryptodev); } -static struct rte_vdev_driver armv8_crypto_drv = { +static struct rte_vdev_driver armv8_crypto_pmd_drv = { .probe = cryptodev_armv8_crypto_init, .remove = cryptodev_armv8_crypto_uninit }; -RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv); +static struct cryptodev_driver armv8_crypto_drv; + +RTE_LOG_REGISTER_DEFAULT(crypto_armv8_log_type, ERR); + +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver, + cryptodev_driver_id);