# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
-# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
# - DPDK_DEP_NUMA ([y]/n)
unset CROSS
unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
- unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
unset DPDK_DEP_NUMA
sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config
test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config
- test "$DPDK_DEP_ISAL_CRYPTO" != y || \
+ test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
test -z "$LIBSSO_SNOW3G_PATH" || \
sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config
.. BSD LICENSE
- Copyright(c) 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
The AES-NI GCM PMD (**librte_pmd_aesni_gcm**) provides poll mode crypto driver
-support for utilizing Intel ISA-L crypto library, which provides operation acceleration
-through the AES-NI instruction sets for AES-GCM authenticated cipher algorithm.
+support for utilizing Intel multi buffer library (see AES-NI Multi-buffer PMD documentation
+to learn more about it, including installation).
Features
--------
* RTE_CRYPTO_AEAD_AES_GCM
+Limitations
+-----------
+
+* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
+* Cipher only is not supported.
+
+
Installation
------------
-To build DPDK with the AESNI_GCM_PMD the user is required to install
-the ``libisal_crypto`` library in the build environment.
-For download and more details please visit `<https://github.com/01org/isa-l_crypto>`_.
+To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer
+library from `here <https://github.com/01org/intel-ipsec-mb>`_
+and compile it on their user system before building DPDK.
+The latest version of the library supported by this PMD is v0.46, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.46.zip>`_.
+
+.. code-block:: console
+
+ make
+
+As a reference, the following table shows a mapping between the past DPDK versions
+and the external crypto libraries supported by them:
+
+.. _table_aesni_gcm_versions:
+
+.. table:: DPDK and external crypto library version compatibility
+
+ ============= ================================
+ DPDK version Crypto library version
+ ============= ================================
+ 16.04 - 16.11 Multi-buffer library 0.43 - 0.44
+ 17.02 - 17.05 ISA-L Crypto v2.18
+ 17.08+ Multi-buffer library 0.46+
+ ============= ================================
+
Initialization
--------------
In order to enable this virtual crypto PMD, user must:
-* Install the ISA-L crypto library (explained in Installation section).
+* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where
+ the library was extracted.
+
+* Build the multi buffer library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base.
.. code-block:: console
./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_gcm,socket_id=1,max_nb_sessions=128"
-
-Limitations
------------
-
-* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
-* Hash only is not supported.
-* Cipher only is not supported.
Symmetric crypto = Y
Sym operation chaining = Y
CPU AESNI = Y
-
+CPU SSE = Y
+CPU AVX = Y
+CPU AVX2 = Y
;
; Supported crypto algorithms of the 'aesni_gcm' crypto driver.
;
* 12-byte IV on AES Counter Mode, apart from the previous 16-byte IV.
+* **Updated the AES-NI GCM PMD.**
+
+ The AES-NI GCM PMD was migrated from the ISA-L library to the Multi Buffer
+ library, as the latter library has Scatter Gather List support
+ now. The migration entailed adding additional support for:
+
+ * 192-bit key.
+
Resolved Issues
---------------
# BSD LICENSE
#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
include $(RTE_SDK)/mk/rte.vars.mk
ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
endif
# library name
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-LDLIBS += -lisal_crypto
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#define LINUX
#endif
-#include <isa-l_crypto/aes_gcm.h>
+#include <gcm_defines.h>
+#include <aux_funcs.h>
-typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data,
- uint8_t *iv,
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+ RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+ RTE_AESNI_GCM_SSE,
+ RTE_AESNI_GCM_AVX,
+ RTE_AESNI_GCM_AVX2,
+ RTE_AESNI_GCM_VECTOR_NUM
+};
+
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_192,
+ AESNI_GCM_KEY_256,
+ AESNI_GCM_KEY_NUM
+};
+
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data, uint8_t *out,
+ const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
+ const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ const uint8_t *iv,
uint8_t const *aad,
uint64_t aad_len);
-typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data,
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
uint8_t *out,
const uint8_t *in,
uint64_t plaintext_len);
-typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data,
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
uint8_t *auth_tag,
uint64_t auth_tag_len);
+/** GCM library function pointer table */
struct aesni_gcm_ops {
+ aesni_gcm_t enc; /**< GCM encode function pointer */
+ aesni_gcm_t dec; /**< GCM decode function pointer */
+ aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
aesni_gcm_init_t init;
- aesni_gcm_update_t update;
+ aesni_gcm_update_t update_enc;
+ aesni_gcm_update_t update_dec;
aesni_gcm_finalize_t finalize;
};
+#define AES_GCM_FN(keylen, arch) \
+aes_gcm_enc_##keylen##_##arch,\
+aes_gcm_dec_##keylen##_##arch,\
+aes_gcm_pre_##keylen##_##arch,\
+aes_gcm_init_##keylen##_##arch,\
+aes_gcm_enc_##keylen##_update_##arch,\
+aes_gcm_dec_##keylen##_update_##arch,\
+aes_gcm_enc_##keylen##_finalize_##arch,
+
+static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
+ [RTE_AESNI_GCM_NOT_SUPPORTED] = {
+ [AESNI_GCM_KEY_128] = {NULL},
+ [AESNI_GCM_KEY_192] = {NULL},
+ [AESNI_GCM_KEY_256] = {NULL}
+ },
+ [RTE_AESNI_GCM_SSE] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, sse)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, sse)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, sse)
+ }
+ },
+ [RTE_AESNI_GCM_AVX] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen2)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen2)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen2)
+ }
+ },
+ [RTE_AESNI_GCM_AVX2] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen4)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen4)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen4)
+ }
+ }
+};
#endif /* _AESNI_GCM_OPS_H_ */
#include "aesni_gcm_pmd_private.h"
-/** GCM encode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_enc[] = {
- [AESNI_GCM_KEY_128] = {
- aesni_gcm128_init,
- aesni_gcm128_enc_update,
- aesni_gcm128_enc_finalize
- },
- [AESNI_GCM_KEY_256] = {
- aesni_gcm256_init,
- aesni_gcm256_enc_update,
- aesni_gcm256_enc_finalize
- }
-};
-
-/** GCM decode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_dec[] = {
- [AESNI_GCM_KEY_128] = {
- aesni_gcm128_init,
- aesni_gcm128_dec_update,
- aesni_gcm128_dec_finalize
- },
- [AESNI_GCM_KEY_256] = {
- aesni_gcm256_init,
- aesni_gcm256_dec_update,
- aesni_gcm256_dec_finalize
- }
-};
/** Parse crypto xform chain and set private session parameters */
int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+ struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
const struct rte_crypto_sym_xform *auth_xform;
/* Check key length and calculate GCM pre-compute. */
switch (key_length) {
case 16:
- aesni_gcm128_pre(key, &sess->gdata);
sess->key = AESNI_GCM_KEY_128;
-
+ break;
+ case 24:
+ sess->key = AESNI_GCM_KEY_192;
break;
case 32:
- aesni_gcm256_pre(key, &sess->gdata);
sess->key = AESNI_GCM_KEY_256;
-
break;
default:
GCM_LOG_ERR("Unsupported key length");
return -EINVAL;
}
+ gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+
/* Digest check */
if (digest_length != 16 &&
digest_length != 12 &&
sess = (struct aesni_gcm_session *)
((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_gcm_set_session_parameters(sess,
+ if (unlikely(aesni_gcm_set_session_parameters(qp->ops, sess,
sym_op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
/**
- * Process a crypto operation and complete a JOB_AES_HMAC job structure for
- * submission to the multi buffer library for processing.
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
*
* @param qp queue pair
* @param op symmetric crypto operation
*
*/
static int
-process_gcm_crypto_op(struct rte_crypto_op *op,
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
- aesni_gcm_enc[session->key].init(&session->gdata,
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
iv_ptr,
sym_op->aead.aad.data,
(uint64_t)session->aad_length);
- aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
total_len = data_length - part_len;
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
- aesni_gcm_enc[session->key].update(&session->gdata,
- dst, src,
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
- aesni_gcm_enc[session->key].finalize(&session->gdata,
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
sym_op->aead.digest.data,
(uint64_t)session->digest_length);
} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
return -1;
}
- aesni_gcm_dec[session->key].init(&session->gdata,
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
iv_ptr,
sym_op->aead.aad.data,
(uint64_t)session->aad_length);
- aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
total_len = data_length - part_len;
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
- aesni_gcm_dec[session->key].update(&session->gdata,
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx,
dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
- aesni_gcm_dec[session->key].finalize(&session->gdata,
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
auth_tag,
(uint64_t)session->digest_length);
} else if (session->op == AESNI_GMAC_OP_GENERATE) {
- aesni_gcm_enc[session->key].init(&session->gdata,
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
iv_ptr,
src,
(uint64_t)data_length);
- aesni_gcm_enc[session->key].finalize(&session->gdata,
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
sym_op->auth.digest.data,
(uint64_t)session->digest_length);
} else { /* AESNI_GMAC_OP_VERIFY */
return -1;
}
- aesni_gcm_dec[session->key].init(&session->gdata,
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
iv_ptr,
src,
(uint64_t)data_length);
- aesni_gcm_dec[session->key].finalize(&session->gdata,
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
auth_tag,
(uint64_t)session->digest_length);
}
break;
}
- retval = process_gcm_crypto_op(ops[i], sess);
+ retval = process_gcm_crypto_op(qp, ops[i], sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
+ enum aesni_gcm_vector_mode vector_mode;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
return -EFAULT;
}
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_GCM_AVX;
+ else
+ vector_mode = RTE_AESNI_GCM_SSE;
+
dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct aesni_gcm_private), init_params->socket_id,
vdev);
RTE_CRYPTODEV_FF_CPU_AESNI |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ switch (vector_mode) {
+ case RTE_AESNI_GCM_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_GCM_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_GCM_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
internals = dev->data->dev_private;
+ internals->vector_mode = vector_mode;
+
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
.key_size = {
.min = 16,
.max = 32,
- .increment = 16
+ .increment = 8
},
.digest_size = {
.min = 8,
.key_size = {
.min = 16,
.max = 32,
- .increment = 16
+ .increment = 8
},
.digest_size = {
.min = 8,
int socket_id)
{
struct aesni_gcm_qp *qp = NULL;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
+ qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
+
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL)
/** Configure a aesni gcm session from a crypto xform chain */
static void *
-aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
if (unlikely(sess == NULL)) {
GCM_LOG_ERR("invalid session struct");
return NULL;
}
- if (aesni_gcm_set_session_parameters(sess, xform) != 0) {
+ if (aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
+ sess, xform) != 0) {
GCM_LOG_ERR("failed configure session parameters");
return NULL;
}
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
+ enum aesni_gcm_vector_mode vector_mode;
+ /**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
};
struct aesni_gcm_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
- /**< Unique Queue Pair Name */
+ const struct aesni_gcm_ops *ops;
+ /**< Architecture dependent function pointer table of the gcm APIs */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
+ struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
+ /**< GCM parameters */
+ struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
+ /**< Queue pair statistics */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
} __rte_cache_aligned;
AESNI_GMAC_OP_VERIFY
};
-enum aesni_gcm_key {
- AESNI_GCM_KEY_128,
- AESNI_GCM_KEY_256
-};
-
/** AESNI GCM private session structure */
struct aesni_gcm_session {
struct {
/**< GCM operation type */
enum aesni_gcm_key key;
/**< GCM key type */
- struct gcm_data gdata __rte_cache_aligned;
+ struct gcm_key_data gdata_key;
/**< GCM parameters */
};
* - On failure returns error code < 0
*/
extern int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+ struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform);
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lisal_crypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto