X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Faesni_mb%2Frte_aesni_mb_pmd.c;h=1d93b8e3e2ece211bba3b5e7959624be1ba20abd;hb=bb12152c9d5bacd984f5a0a783327b4e3cb65c70;hp=f9a7d5ba34da57aec672385c8bd77c1b67f2464a;hpb=a09c6df0e629e4530cff35fd1792714f82af7516;p=dpdk.git diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index f9a7d5ba34..1d93b8e3e2 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -41,6 +41,8 @@ #include "rte_aesni_mb_pmd_private.h" +static uint8_t cryptodev_driver_id; + typedef void (*hash_one_block_t)(const void *data, void *digest); typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys); @@ -167,7 +169,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, break; default: MB_LOG_ERR("Unsupported authentication algorithm selection"); - return -1; + return -ENOTSUP; } /* Calculate Authentication precomputes */ @@ -195,7 +197,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { MB_LOG_ERR("Crypto xform struct not of type cipher"); - return -1; + return -EINVAL; } /* Select cipher direction */ @@ -207,8 +209,8 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, sess->cipher.direction = DECRYPT; break; default: - MB_LOG_ERR("Unsupported cipher operation parameter"); - return -1; + MB_LOG_ERR("Invalid cipher operation parameter"); + return -EINVAL; } /* Select cipher mode */ @@ -224,7 +226,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, break; default: MB_LOG_ERR("Unsupported cipher mode parameter"); - return -1; + return -ENOTSUP; } /* Check key length and choose key expansion function */ @@ -242,10 +244,14 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, aes_keyexp_fn = mb_ops->aux.keyexp.aes256; break; default: - MB_LOG_ERR("Unsupported cipher key length"); - return -1; + MB_LOG_ERR("Invalid cipher key length"); + return -EINVAL; } + /* Set IV parameters */ + sess->iv.offset = xform->cipher.iv.offset; + sess->iv.length = xform->cipher.iv.length; + /* Expanded cipher keys */ (*aes_keyexp_fn)(xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, @@ -262,6 +268,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, { const struct rte_crypto_sym_xform *auth_xform = NULL; const struct rte_crypto_sym_xform *cipher_xform = NULL; + int ret; /* Select Crypto operation - hash then cipher / cipher then hash */ switch (aesni_mb_get_chain_order(xform)) { @@ -297,19 +304,25 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, case AESNI_MB_OP_NOT_SUPPORTED: default: MB_LOG_ERR("Unsupported operation chain order parameter"); - return -1; + return -ENOTSUP; } - if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) { + /* Default IV length = 0 */ + sess->iv.length = 0; + + ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform); + if (ret != 0) { MB_LOG_ERR("Invalid/unsupported authentication parameters"); - return -1; + return ret; } - if (aesni_mb_set_session_cipher_parameters(mb_ops, sess, - cipher_xform)) { + ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess, + cipher_xform); + if (ret != 0) { MB_LOG_ERR("Invalid/unsupported cipher parameters"); - return -1; + return ret; } + return 0; } @@ -345,30 +358,38 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) { struct aesni_mb_session *sess = NULL; - if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { - if (unlikely(op->sym->session->dev_type != - RTE_CRYPTODEV_AESNI_MB_PMD)) { - return NULL; - } - - sess = (struct aesni_mb_session *)op->sym->session->_private; - } else { + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (likely(op->sym->session != NULL)) + sess = (struct aesni_mb_session *) + get_session_private_data( + op->sym->session, + cryptodev_driver_id); + } else { void *_sess = NULL; + void *_sess_private_data = NULL; if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) return NULL; - sess = (struct aesni_mb_session *) - ((struct rte_cryptodev_sym_session *)_sess)->_private; + if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) + return NULL; + + sess = (struct aesni_mb_session *)_sess_private_data; if (unlikely(aesni_mb_set_session_parameters(qp->op_fns, sess, op->sym->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp, _sess_private_data); sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; + set_session_private_data(op->sym->session, cryptodev_driver_id, + _sess_private_data); } + if (unlikely(sess == NULL)) + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return sess; } @@ -386,7 +407,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) */ static inline int set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, - struct rte_crypto_op *op) + struct rte_crypto_op *op, uint8_t *digest_idx) { struct rte_mbuf *m_src = op->sym->m_src, *m_dst; struct aesni_mb_session *session; @@ -397,7 +418,6 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; return -1; } - op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED; /* Set crypto operation */ job->chain_order = session->chain_order; @@ -446,19 +466,8 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, /* Set digest output location */ if (job->hash_alg != NULL_HASH && session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { - job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst, - get_digest_byte_length(job->hash_alg)); - - if (job->auth_tag_output == NULL) { - MB_LOG_ERR("failed to allocate space in output mbuf " - "for temp digest"); - op->status = RTE_CRYPTO_OP_STATUS_ERROR; - return -1; - } - - memset(job->auth_tag_output, 0, - sizeof(get_digest_byte_length(job->hash_alg))); - + job->auth_tag_output = qp->temp_digests[*digest_idx]; + *digest_idx = (*digest_idx + 1) % MAX_JOBS; } else { job->auth_tag_output = op->sym->auth.digest.data; } @@ -471,8 +480,9 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, get_truncated_digest_byte_length(job->hash_alg); /* Set IV parameters */ - job->iv = op->sym->cipher.iv.data; - job->iv_len_in_bytes = op->sym->cipher.iv.length; + job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, + session->iv.offset); + job->iv_len_in_bytes = session->iv.length; /* Data Parameter */ job->src = rte_pktmbuf_mtod(m_src, uint8_t *); @@ -486,53 +496,46 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, /* Set user data to be crypto operation data struct */ job->user_data = op; - job->user_data2 = m_dst; return 0; } static inline void -verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) { - struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2; - +verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job, + struct rte_crypto_op *op) { /* Verify digest if required */ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data, job->auth_tag_output_len_in_bytes) != 0) op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - - /* trim area used for digest from mbuf */ - rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg)); } /** * Process a completed job and return rte_mbuf which job processed * + * @param qp Queue Pair to process * @param job JOB_AES_HMAC job to process * * @return - * - Returns processed mbuf which is trimmed of output digest used in - * verification of supplied digest in the case of a HASH_CIPHER operation + * - Returns processed crypto operation. * - Returns NULL on invalid job */ static inline struct rte_crypto_op * post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) { struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; + struct aesni_mb_session *sess = get_session_private_data( + op->sym->session, + cryptodev_driver_id); - struct aesni_mb_session *sess; - - if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) { + if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) { switch (job->status) { case STS_COMPLETED: op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; if (job->hash_alg != NULL_HASH) { - sess = (struct aesni_mb_session *) - op->sym->session->_private; - if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) - verify_digest(job, op); + verify_digest(qp, job, op); } break; default: @@ -541,7 +544,11 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) } /* Free session if a session-less crypto op */ - if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + memset(sess, 0, sizeof(struct aesni_mb_session)); + memset(op->sym->session, 0, + rte_cryptodev_get_header_session_size()); + rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; } @@ -566,7 +573,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job, struct rte_crypto_op *op = NULL; unsigned processed_jobs = 0; - while (job != NULL && processed_jobs < nb_ops) { + while (job != NULL) { op = post_process_mb_job(qp, job); if (op) { @@ -576,6 +583,8 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job, qp->stats.dequeue_err_count++; break; } + if (processed_jobs == nb_ops) + break; job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr); } @@ -621,6 +630,10 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, int retval, processed_jobs = 0; + if (unlikely(nb_ops == 0)) + return 0; + + uint8_t digest_idx = qp->digest_idx; do { /* Get next operation to process from ingress queue */ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op); @@ -638,7 +651,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); } - retval = set_mb_job_params(job, qp, op); + retval = set_mb_job_params(job, qp, op, &digest_idx); if (unlikely(retval != 0)) { qp->stats.dequeue_err_count++; set_job_null_op(job); @@ -658,6 +671,8 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, } while (processed_jobs < nb_ops); + qp->digest_idx = digest_idx; + if (processed_jobs < 1) processed_jobs += flush_mb_mgr(qp, &ops[processed_jobs], @@ -688,12 +703,8 @@ cryptodev_aesni_mb_create(const char *name, vector_mode = RTE_AESNI_MB_AVX2; else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) vector_mode = RTE_AESNI_MB_AVX; - else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + else vector_mode = RTE_AESNI_MB_SSE; - else { - MB_LOG_ERR("Vector instructions are not supported by CPU"); - return -EFAULT; - } dev = rte_cryptodev_vdev_pmd_init(init_params->name, sizeof(struct aesni_mb_private), init_params->socket_id, @@ -703,7 +714,7 @@ cryptodev_aesni_mb_create(const char *name, goto init_error; } - dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD; + dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_aesni_mb_pmd_ops; /* register rx/tx burst functions for data path */ @@ -798,9 +809,14 @@ static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = { .remove = cryptodev_aesni_mb_remove }; +static struct cryptodev_driver aesni_mb_crypto_drv; + RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, "max_nb_queue_pairs= " "max_nb_sessions= " "socket_id="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv, + cryptodev_aesni_mb_pmd_drv, + cryptodev_driver_id);