.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2016-2017 Intel Corporation.
+ Copyright(c) 2016-2019 Intel Corporation.
AES-NI GCM Crypto Poll Mode Driver
==================================
To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer
library from `here <https://github.com/01org/intel-ipsec-mb>`_
and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.50, which
-can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_.
+The latest version of the library supported by this PMD is v0.52, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.52.zip>`_.
.. code-block:: console
16.04 - 16.11 Multi-buffer library 0.43 - 0.44
17.02 - 17.05 ISA-L Crypto v2.18
17.08 - 18.02 Multi-buffer library 0.46 - 0.48
- 18.05+ Multi-buffer library 0.49+
+ 18.05 - 19.02 Multi-buffer library 0.49 - 0.52
+ 19.05+ Multi-buffer library 0.52
============= ================================
CPU SSE = Y
CPU AVX = Y
CPU AVX2 = Y
+CPU AVX512 = Y
OOP SGL In LB Out = Y
OOP LB In LB Out = Y
;
RTE_AESNI_GCM_SSE,
RTE_AESNI_GCM_AVX,
RTE_AESNI_GCM_AVX2,
+ RTE_AESNI_GCM_AVX512,
RTE_AESNI_GCM_VECTOR_NUM
};
enum aesni_gcm_key {
- AESNI_GCM_KEY_128,
- AESNI_GCM_KEY_192,
- AESNI_GCM_KEY_256,
- AESNI_GCM_KEY_NUM
+ GCM_KEY_128 = 0,
+ GCM_KEY_192,
+ GCM_KEY_256,
+ GCM_KEY_NUM
};
-
typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
struct gcm_context_data *gcm_ctx_data, uint8_t *out,
const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
const uint8_t *aad, uint64_t aad_len,
uint8_t *auth_tag, uint64_t auth_tag_len);
-typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
+typedef void (*aesni_gcm_pre_t)(const void *key, struct gcm_key_data *gcm_data);
typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
struct gcm_context_data *gcm_ctx_data,
struct aesni_gcm_ops {
aesni_gcm_t enc; /**< GCM encode function pointer */
aesni_gcm_t dec; /**< GCM decode function pointer */
- aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
+ aesni_gcm_pre_t pre; /**< GCM pre-compute */
aesni_gcm_init_t init;
aesni_gcm_update_t update_enc;
aesni_gcm_update_t update_dec;
- aesni_gcm_finalize_t finalize;
+ aesni_gcm_finalize_t finalize_enc;
+ aesni_gcm_finalize_t finalize_dec;
};
-#define AES_GCM_FN(keylen, arch) \
-aes_gcm_enc_##keylen##_##arch,\
-aes_gcm_dec_##keylen##_##arch,\
-aes_gcm_pre_##keylen##_##arch,\
-aes_gcm_init_##keylen##_##arch,\
-aes_gcm_enc_##keylen##_update_##arch,\
-aes_gcm_dec_##keylen##_update_##arch,\
-aes_gcm_enc_##keylen##_finalize_##arch,
-
-static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
- [RTE_AESNI_GCM_NOT_SUPPORTED] = {
- [AESNI_GCM_KEY_128] = {NULL},
- [AESNI_GCM_KEY_192] = {NULL},
- [AESNI_GCM_KEY_256] = {NULL}
- },
- [RTE_AESNI_GCM_SSE] = {
- [AESNI_GCM_KEY_128] = {
- AES_GCM_FN(128, sse)
- },
- [AESNI_GCM_KEY_192] = {
- AES_GCM_FN(192, sse)
- },
- [AESNI_GCM_KEY_256] = {
- AES_GCM_FN(256, sse)
- }
- },
- [RTE_AESNI_GCM_AVX] = {
- [AESNI_GCM_KEY_128] = {
- AES_GCM_FN(128, avx_gen2)
- },
- [AESNI_GCM_KEY_192] = {
- AES_GCM_FN(192, avx_gen2)
- },
- [AESNI_GCM_KEY_256] = {
- AES_GCM_FN(256, avx_gen2)
- }
- },
- [RTE_AESNI_GCM_AVX2] = {
- [AESNI_GCM_KEY_128] = {
- AES_GCM_FN(128, avx_gen4)
- },
- [AESNI_GCM_KEY_192] = {
- AES_GCM_FN(192, avx_gen4)
- },
- [AESNI_GCM_KEY_256] = {
- AES_GCM_FN(256, avx_gen4)
- }
- }
-};
#endif /* _AESNI_GCM_OPS_H_ */
/* Check key length and calculate GCM pre-compute. */
switch (key_length) {
case 16:
- sess->key = AESNI_GCM_KEY_128;
+ sess->key = GCM_KEY_128;
break;
case 24:
- sess->key = AESNI_GCM_KEY_192;
+ sess->key = GCM_KEY_192;
break;
case 32:
- sess->key = AESNI_GCM_KEY_256;
+ sess->key = GCM_KEY_256;
break;
default:
AESNI_GCM_LOG(ERR, "Invalid key length");
return -EINVAL;
}
- gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+ gcm_ops[sess->key].pre(key, &sess->gdata_key);
/* Digest check */
if (sess->req_digest_length > 16) {
else
tag = sym_op->aead.digest.data;
- qp->ops[session->key].finalize(&session->gdata_key,
+ qp->ops[session->key].finalize_enc(&session->gdata_key,
&qp->gdata_ctx,
tag,
session->gen_digest_length);
}
tag = qp->temp_digest;
- qp->ops[session->key].finalize(&session->gdata_key,
+ qp->ops[session->key].finalize_dec(&session->gdata_key,
&qp->gdata_ctx,
tag,
session->gen_digest_length);
tag = qp->temp_digest;
else
tag = sym_op->auth.digest.data;
- qp->ops[session->key].finalize(&session->gdata_key,
+ qp->ops[session->key].finalize_enc(&session->gdata_key,
&qp->gdata_ctx,
tag,
session->gen_digest_length);
* the bytes passed.
*/
tag = qp->temp_digest;
- qp->ops[session->key].finalize(&session->gdata_key,
+ qp->ops[session->key].finalize_enc(&session->gdata_key,
&qp->gdata_ctx,
tag,
session->gen_digest_length);
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
enum aesni_gcm_vector_mode vector_mode;
+ MB_MGR *mb_mgr;
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
}
/* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_GCM_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_GCM_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_GCM_AVX;
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+ mb_mgr = alloc_mb_mgr(0);
+ if (mb_mgr == NULL)
+ return -ENOMEM;
+
switch (vector_mode) {
case RTE_AESNI_GCM_SSE:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ init_mb_mgr_sse(mb_mgr);
break;
case RTE_AESNI_GCM_AVX:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ init_mb_mgr_avx(mb_mgr);
break;
case RTE_AESNI_GCM_AVX2:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ init_mb_mgr_avx2(mb_mgr);
break;
- default:
+ case RTE_AESNI_GCM_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ init_mb_mgr_avx512(mb_mgr);
break;
+ default:
+ AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+ goto error_exit;
}
internals = dev->data->dev_private;
internals->vector_mode = vector_mode;
+ internals->mb_mgr = mb_mgr;
+
+ /* Set arch independent function pointers, based on key size */
+ internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
+ internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
+ internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
+ internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
+ internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
+ internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
+ internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
+ internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+
+ internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
+ internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
+ internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
+ internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
+ internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
+ internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
+ internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
+ internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+
+ internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
+ internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
+ internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
+ internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
+ internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
+ internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
+ internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
+ internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
#endif
return 0;
+
+error_exit:
+ if (mb_mgr)
+ free_mb_mgr(mb_mgr);
+
+ rte_cryptodev_pmd_destroy(dev);
+
+ return -1;
}
static int
aesni_gcm_remove(struct rte_vdev_device *vdev)
{
struct rte_cryptodev *cryptodev;
+ struct aesni_gcm_private *internals;
const char *name;
name = rte_vdev_device_name(vdev);
if (cryptodev == NULL)
return -ENODEV;
+ internals = cryptodev->data->dev_private;
+
+ free_mb_mgr(internals->mb_mgr);
+
return rte_cryptodev_pmd_destroy(cryptodev);
}
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
- qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
+ qp->ops = (const struct aesni_gcm_ops *)internals->ops;
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
"Couldn't get object from session mempool");
return -ENOMEM;
}
- ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
+ ret = aesni_gcm_set_session_parameters(internals->ops,
sess_private_data, xform);
if (ret != 0) {
AESNI_GCM_LOG(ERR, "failed configure session parameters");
/**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
+ MB_MGR *mb_mgr;
+ /**< Multi-buffer instance */
+ struct aesni_gcm_ops ops[GCM_KEY_NUM];
+ /**< Function pointer table of the gcm APIs */
};
struct aesni_gcm_qp {
const struct aesni_gcm_ops *ops;
- /**< Architecture dependent function pointer table of the gcm APIs */
+ /**< Function pointer table of the gcm APIs */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */