* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <openssl/aes.h>
-
#include <rte_common.h>
#include <rte_config.h>
#include <rte_hexdump.h>
#include "aesni_gcm_pmd_private.h"
-static int
-aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
- uint8_t *aeskey, unsigned aeskey_length)
-{
- uint8_t key[aeskey_length] __rte_aligned(16);
- AES_KEY enc_key;
-
- if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
- return -EFAULT;
-
- memcpy(key, aeskey, aeskey_length);
-
- if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
- return -EFAULT;
-
- AES_encrypt(hsubkey, hsubkey, &enc_key);
-
- return 0;
-}
-
-/** Get xform chain order */
-static int
-aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
-{
- /*
- * GCM only supports authenticated encryption or authenticated
- * decryption, all other options are invalid, so we must have exactly
- * 2 xform structs chained together
- */
- if (xform->next == NULL || xform->next->next != NULL)
- return -1;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- }
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
- return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
- }
+/** GCM encode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_enc[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_enc_update,
+ aesni_gcm128_enc_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_enc_update,
+ aesni_gcm256_enc_finalize
+ }
+};
- return -1;
-}
+/** GCM decode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_dec[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_dec_update,
+ aesni_gcm128_dec_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_dec_update,
+ aesni_gcm256_dec_finalize
+ }
+};
/** Parse crypto xform chain and set private session parameters */
int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
- struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform;
+ const struct rte_crypto_sym_xform *cipher_xform;
- uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- switch (aesni_gcm_get_mode(xform)) {
- case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
- sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ if (xform->next == NULL || xform->next->next != NULL) {
+ GCM_LOG_ERR("Two and only two chained xform required");
+ return -EINVAL;
+ }
- cipher_xform = xform;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform->next;
- break;
- case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
- sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-
+ cipher_xform = xform;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
auth_xform = xform;
cipher_xform = xform->next;
- break;
- default:
- GCM_LOG_ERR("Unsupported operation chain order parameter");
+ } else {
+ GCM_LOG_ERR("Cipher and auth xform required");
return -EINVAL;
}
- /* We only support AES GCM */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
- auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+ if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
+ auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
+ GCM_LOG_ERR("We only support AES GCM and AES GMAC");
return -EINVAL;
+ }
- /* Select cipher direction */
- if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
- cipher_xform->cipher.op !=
- RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
- "(DECRYPT) specified are an invalid selection");
- return -EINVAL;
- } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
- cipher_xform->cipher.op !=
- RTE_CRYPTO_CIPHER_OP_DECRYPT) {
- GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
- "(ENCRYPT) specified are an invalid selection");
+ /* Select Crypto operation */
+ if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ else {
+ GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
+ " Decrypt/Verify are valid only");
return -EINVAL;
}
- /* Expand GCM AES128 key */
- (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
- sess->gdata.expanded_keys);
+ /* Check key length and calculate GCM pre-compute. */
+ switch (cipher_xform->cipher.key.length) {
+ case 16:
+ aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_128;
- /* Calculate hash sub key here */
- aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
- cipher_xform->cipher.key.data,
- cipher_xform->cipher.key.length);
+ break;
+ case 32:
+ aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_256;
- /* Calculate GCM pre-compute */
- (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+ break;
+ default:
+ GCM_LOG_ERR("Unsupported cipher key length");
+ return -EINVAL;
+ }
return 0;
}
return sess;
sess = (struct aesni_gcm_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
- sess, op->xform) != 0)) {
+ if (unlikely(aesni_gcm_set_session_parameters(sess,
+ op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
*
*/
static int
-process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct rte_crypto_sym_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
- struct rte_mbuf *m = op->m_src;
+ struct rte_mbuf *m_src = op->m_src;
+ uint32_t offset = op->cipher.data.offset;
+ uint32_t part_len, total_len, data_len;
+
+ RTE_ASSERT(m_src != NULL);
+
+ while (offset >= m_src->data_len) {
+ offset -= m_src->data_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+ }
+
+ data_len = m_src->data_len - offset;
+ part_len = (data_len < op->cipher.data.length) ? data_len :
+ op->cipher.data.length;
+
+ /* Destination buffer is required when segmented source buffer */
+ RTE_ASSERT((part_len == op->cipher.data.length) ||
+ ((part_len != op->cipher.data.length) &&
+ (op->m_dst != NULL)));
+ /* Segmented destination buffer is not supported */
+ RTE_ASSERT((op->m_dst == NULL) ||
+ ((op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(op->m_dst)));
+
- src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
dst = op->m_dst ?
rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
op->cipher.data.offset) :
- rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
op->cipher.data.offset);
+ src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
/* sanity checks */
if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
op->cipher.iv.length != 0) {
*iv_padd = rte_bswap32(1);
}
- if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
- op->auth.aad.length != 0) {
- GCM_LOG_ERR("iv");
- return -1;
- }
-
if (op->auth.digest.length != 16 &&
op->auth.digest.length != 12 &&
- op->auth.digest.length != 8 &&
- op->auth.digest.length != 0) {
- GCM_LOG_ERR("iv");
+ op->auth.digest.length != 8) {
+ GCM_LOG_ERR("digest");
return -1;
}
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
- (*qp->ops->gcm.enc)(&session->gdata, dst, src,
- (uint64_t)op->cipher.data.length,
+ aesni_gcm_enc[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
- (uint64_t)op->auth.aad.length,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_enc[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_enc[session->key].finalize(&session->gdata,
op->auth.digest.data,
(uint64_t)op->auth.digest.length);
- } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+ } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
+ op->m_dst : op->m_src,
op->auth.digest.length);
if (!auth_tag) {
- GCM_LOG_ERR("iv");
+ GCM_LOG_ERR("auth_tag");
return -1;
}
- (*qp->ops->gcm.dec)(&session->gdata, dst, src,
- (uint64_t)op->cipher.data.length,
+ aesni_gcm_dec[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
- (uint64_t)op->auth.aad.length,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_dec[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_dec[session->key].finalize(&session->gdata,
auth_tag,
(uint64_t)op->auth.digest.length);
- } else {
- GCM_LOG_ERR("iv");
- return -1;
}
return 0;
break;
}
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- GCM_LOG_ERR("PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", ops[i]);
- qp->qp_stats.enqueue_err_count++;
- break;
- }
-#endif
-
- retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+ retval = process_gcm_crypto_op(ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.enqueue_err_count++;
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
- enum aesni_gcm_vector_mode vector_mode;
if (init_params->name[0] == '\0') {
int ret = rte_cryptodev_pmd_create_dev_name(
return -EFAULT;
}
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
- vector_mode = RTE_AESNI_GCM_AVX2;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
- vector_mode = RTE_AESNI_GCM_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- vector_mode = RTE_AESNI_GCM_SSE;
- else {
- GCM_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct aesni_gcm_private), init_params->socket_id);
if (dev == NULL) {
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_CPU_AESNI;
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
- switch (vector_mode) {
- case RTE_AESNI_GCM_SSE:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- break;
- case RTE_AESNI_GCM_AVX:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- break;
- case RTE_AESNI_GCM_AVX2:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
- break;
- default:
- break;
- }
-
- /* Set vector instructions mode supported */
internals = dev->data->dev_private;
- internals->vector_mode = vector_mode;
-
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;