/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
{
const struct rte_crypto_sym_xform *auth_xform;
const struct rte_crypto_sym_xform *cipher_xform;
+ uint16_t digest_length;
+ uint8_t key_length;
+ uint8_t *key;
- if (xform->next == NULL || xform->next->next != NULL) {
- GCM_LOG_ERR("Two and only two chained xform required");
- return -EINVAL;
- }
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- auth_xform = xform->next;
- cipher_xform = xform;
- } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* AES-GMAC */
+ if (xform->next == NULL) {
auth_xform = xform;
- cipher_xform = xform->next;
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+ GCM_LOG_ERR("Only AES GMAC is supported as an "
+ "authentication only algorithm");
+ return -EINVAL;
+ }
+ /* Set IV parameters */
+ sess->iv.offset = auth_xform->auth.iv.offset;
+ sess->iv.length = auth_xform->auth.iv.length;
+
+ /* Select Crypto operation */
+ if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GMAC_OP_GENERATE;
+ else
+ sess->op = AESNI_GMAC_OP_VERIFY;
+
+ key_length = auth_xform->auth.key.length;
+ key = auth_xform->auth.key.data;
+ /* AES-GCM */
} else {
- GCM_LOG_ERR("Cipher and auth xform required");
- return -EINVAL;
- }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = xform->next;
+ cipher_xform = xform;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ } else {
+ GCM_LOG_ERR("Cipher and auth xform required "
+ "when using AES GCM");
+ return -EINVAL;
+ }
- if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
- auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
- GCM_LOG_ERR("We only support AES GCM and AES GMAC");
- return -EINVAL;
+ if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM))) {
+ GCM_LOG_ERR("The only combined operation "
+ "supported is AES GCM");
+ return -EINVAL;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = cipher_xform->cipher.iv.offset;
+ sess->iv.length = cipher_xform->cipher.iv.length;
+
+ /* Select Crypto operation */
+ if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ else {
+ GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
+ " Decrypt/Verify are valid only");
+ return -EINVAL;
+ }
+
+ key_length = cipher_xform->auth.key.length;
+ key = cipher_xform->auth.key.data;
+
+ sess->aad_length = auth_xform->auth.add_auth_data_length;
}
- /* Select Crypto operation */
- if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
- sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
- sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
- else {
- GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
- " Decrypt/Verify are valid only");
+ /* IV check */
+ if (sess->iv.length != 16 && sess->iv.length != 12 &&
+ sess->iv.length != 0) {
+ GCM_LOG_ERR("Wrong IV length");
return -EINVAL;
}
+ digest_length = auth_xform->auth.digest_length;
+
/* Check key length and calculate GCM pre-compute. */
- switch (cipher_xform->cipher.key.length) {
+ switch (key_length) {
case 16:
- aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ aesni_gcm128_pre(key, &sess->gdata);
sess->key = AESNI_GCM_KEY_128;
break;
case 32:
- aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ aesni_gcm256_pre(key, &sess->gdata);
sess->key = AESNI_GCM_KEY_256;
break;
default:
- GCM_LOG_ERR("Unsupported cipher key length");
+ GCM_LOG_ERR("Unsupported cipher/auth key length");
return -EINVAL;
}
+ /* Digest check */
+ if (digest_length != 16 &&
+ digest_length != 12 &&
+ digest_length != 8) {
+ GCM_LOG_ERR("digest");
+ return -EINVAL;
+ }
+ sess->digest_length = digest_length;
+
return 0;
}
/** Get gcm session */
static struct aesni_gcm_session *
-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
{
struct aesni_gcm_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
- if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->session->dev_type
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(sym_op->session->dev_type
!= RTE_CRYPTODEV_AESNI_GCM_PMD))
return sess;
- sess = (struct aesni_gcm_session *)op->session->_private;
+ sess = (struct aesni_gcm_session *)sym_op->session->_private;
} else {
void *_sess;
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_gcm_set_session_parameters(sess,
- op->xform) != 0)) {
+ sym_op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
*
*/
static int
-process_gcm_crypto_op(struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct rte_crypto_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
- struct rte_mbuf *m_src = op->m_src;
- uint32_t offset = op->cipher.data.offset;
+ uint8_t *iv_ptr;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_mbuf *m_src = sym_op->m_src;
+ uint32_t offset, data_offset, data_length;
uint32_t part_len, total_len, data_len;
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+ session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ offset = sym_op->cipher.data.offset;
+ data_offset = offset;
+ data_length = sym_op->cipher.data.length;
+ } else {
+ offset = sym_op->auth.data.offset;
+ data_offset = offset;
+ data_length = sym_op->auth.data.length;
+ }
+
RTE_ASSERT(m_src != NULL);
while (offset >= m_src->data_len) {
}
data_len = m_src->data_len - offset;
- part_len = (data_len < op->cipher.data.length) ? data_len :
- op->cipher.data.length;
+ part_len = (data_len < data_length) ? data_len :
+ data_length;
/* Destination buffer is required when segmented source buffer */
- RTE_ASSERT((part_len == op->cipher.data.length) ||
- ((part_len != op->cipher.data.length) &&
- (op->m_dst != NULL)));
+ RTE_ASSERT((part_len == data_length) ||
+ ((part_len != data_length) &&
+ (sym_op->m_dst != NULL)));
/* Segmented destination buffer is not supported */
- RTE_ASSERT((op->m_dst == NULL) ||
- ((op->m_dst != NULL) &&
- rte_pktmbuf_is_contiguous(op->m_dst)));
+ RTE_ASSERT((sym_op->m_dst == NULL) ||
+ ((sym_op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(sym_op->m_dst)));
- dst = op->m_dst ?
- rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
- op->cipher.data.offset) :
- rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
- op->cipher.data.offset);
+ dst = sym_op->m_dst ?
+ rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+ data_offset) :
+ rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ data_offset);
src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
- /* sanity checks */
- if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
- op->cipher.iv.length != 0) {
- GCM_LOG_ERR("iv");
- return -1;
- }
-
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
/*
* GCM working in 12B IV mode => 16B pre-counter block we need
* to set BE LSB to 1, driver expects that 16B is allocated
*/
- if (op->cipher.iv.length == 12) {
- uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
+ if (session->iv.length == 12) {
+ uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
*iv_padd = rte_bswap32(1);
}
- if (op->auth.digest.length != 16 &&
- op->auth.digest.length != 12 &&
- op->auth.digest.length != 8) {
- GCM_LOG_ERR("digest");
- return -1;
- }
-
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
aesni_gcm_enc[session->key].init(&session->gdata,
- op->cipher.iv.data,
- op->auth.aad.data,
- (uint64_t)op->auth.aad.length);
+ iv_ptr,
+ sym_op->auth.aad.data,
+ (uint64_t)session->aad_length);
aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
(uint64_t)part_len);
- total_len = op->cipher.data.length - part_len;
+ total_len = data_length - part_len;
while (total_len) {
dst += part_len;
}
aesni_gcm_enc[session->key].finalize(&session->gdata,
- op->auth.digest.data,
- (uint64_t)op->auth.digest.length);
- } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
- op->m_dst : op->m_src,
- op->auth.digest.length);
+ sym_op->auth.digest.data,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src,
+ session->digest_length);
if (!auth_tag) {
GCM_LOG_ERR("auth_tag");
}
aesni_gcm_dec[session->key].init(&session->gdata,
- op->cipher.iv.data,
- op->auth.aad.data,
- (uint64_t)op->auth.aad.length);
+ iv_ptr,
+ sym_op->auth.aad.data,
+ (uint64_t)session->aad_length);
aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
(uint64_t)part_len);
- total_len = op->cipher.data.length - part_len;
+ total_len = data_length - part_len;
while (total_len) {
dst += part_len;
aesni_gcm_dec[session->key].finalize(&session->gdata,
auth_tag,
- (uint64_t)op->auth.digest.length);
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+ aesni_gcm_enc[session->key].init(&session->gdata,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+ aesni_gcm_enc[session->key].finalize(&session->gdata,
+ sym_op->auth.digest.data,
+ (uint64_t)session->digest_length);
+ } else { /* AESNI_GMAC_OP_VERIFY */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src,
+ session->digest_length);
+
+ if (!auth_tag) {
+ GCM_LOG_ERR("auth_tag");
+ return -1;
+ }
+
+ aesni_gcm_dec[session->key].init(&session->gdata,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+
+ aesni_gcm_dec[session->key].finalize(&session->gdata,
+ auth_tag,
+ (uint64_t)session->digest_length);
}
return 0;
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Verify digest if required */
- if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+ session->op == AESNI_GMAC_OP_VERIFY) {
uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
- m->data_len - op->sym->auth.digest.length);
+ m->data_len - session->digest_length);
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
- op->sym->auth.digest.data, op->sym->auth.digest.length);
+ op->sym->auth.digest.data, session->digest_length);
rte_hexdump(stdout, "auth tag (calc):",
- tag, op->sym->auth.digest.length);
+ tag, session->digest_length);
#endif
if (memcmp(tag, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0)
+ session->digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+ rte_pktmbuf_trim(m, session->digest_length);
}
}
post_process_gcm_crypto_op(op);
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
for (i = 0; i < nb_dequeued; i++) {
- sess = aesni_gcm_get_session(qp, ops[i]->sym);
+ sess = aesni_gcm_get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
- retval = process_gcm_crypto_op(ops[i]->sym, sess);
+ retval = process_gcm_crypto_op(ops[i], sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
return -EFAULT;
}
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct aesni_gcm_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct aesni_gcm_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
GCM_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);