4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <openssl/aes.h>
35 #include <rte_common.h>
36 #include <rte_config.h>
37 #include <rte_hexdump.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
41 #include <rte_malloc.h>
42 #include <rte_cpuflags.h>
44 #include "aesni_gcm_pmd_private.h"
47 * Global static parameter used to create a unique name for each AES-NI multi
48 * buffer crypto device.
50 static unsigned unique_name_id;
53 create_unique_device_name(char *name, size_t size)
60 ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD),
68 aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
69 uint8_t *aeskey, unsigned aeskey_length)
71 uint8_t key[aeskey_length] __rte_aligned(16);
74 if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
77 memcpy(key, aeskey, aeskey_length);
79 if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
82 AES_encrypt(hsubkey, hsubkey, &enc_key);
87 /** Get xform chain order */
89 aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
92 * GCM only supports authenticated encryption or authenticated
93 * decryption, all other options are invalid, so we must have exactly
94 * 2 xform structs chained together
96 if (xform->next == NULL || xform->next->next != NULL)
99 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
100 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
101 return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
104 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
105 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
106 return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
112 /** Parse crypto xform chain and set private session parameters */
114 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
115 struct aesni_gcm_session *sess,
116 const struct rte_crypto_sym_xform *xform)
118 const struct rte_crypto_sym_xform *auth_xform = NULL;
119 const struct rte_crypto_sym_xform *cipher_xform = NULL;
121 uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
123 /* Select Crypto operation - hash then cipher / cipher then hash */
124 switch (aesni_gcm_get_mode(xform)) {
125 case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
126 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
128 cipher_xform = xform;
129 auth_xform = xform->next;
131 case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
132 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
135 cipher_xform = xform->next;
138 GCM_LOG_ERR("Unsupported operation chain order parameter");
142 /* We only support AES GCM */
143 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
144 auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
147 /* Select cipher direction */
148 if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
149 cipher_xform->cipher.op !=
150 RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
151 GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
152 "(DECRYPT) specified are an invalid selection");
154 } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
155 cipher_xform->cipher.op !=
156 RTE_CRYPTO_CIPHER_OP_DECRYPT) {
157 GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
158 "(ENCRYPT) specified are an invalid selection");
162 /* Expand GCM AES128 key */
163 (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
164 sess->gdata.expanded_keys);
166 /* Calculate hash sub key here */
167 aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
168 cipher_xform->cipher.key.data,
169 cipher_xform->cipher.key.length);
171 /* Calculate GCM pre-compute */
172 (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
177 /** Get gcm session */
178 static struct aesni_gcm_session *
179 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
181 struct aesni_gcm_session *sess = NULL;
183 if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
184 if (unlikely(op->session->dev_type
185 != RTE_CRYPTODEV_AESNI_GCM_PMD))
188 sess = (struct aesni_gcm_session *)op->session->_private;
192 if (rte_mempool_get(qp->sess_mp, &_sess))
195 sess = (struct aesni_gcm_session *)
196 ((struct rte_cryptodev_session *)_sess)->_private;
198 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
199 sess, op->xform) != 0)) {
200 rte_mempool_put(qp->sess_mp, _sess);
208 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
209 * submission to the multi buffer library for processing.
211 * @param qp queue pair
212 * @param op symmetric crypto operation
213 * @param session GCM session
219 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
220 struct aesni_gcm_session *session)
223 struct rte_mbuf *m = op->m_src;
225 src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
227 rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
228 op->cipher.data.offset) :
229 rte_pktmbuf_mtod_offset(m, uint8_t *,
230 op->cipher.data.offset);
233 if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
234 op->cipher.iv.length != 0) {
240 * GCM working in 12B IV mode => 16B pre-counter block we need
241 * to set BE LSB to 1, driver expects that 16B is allocated
243 if (op->cipher.iv.length == 12) {
244 op->cipher.iv.data[15] = 1;
247 if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
248 op->auth.aad.length != 0) {
253 if (op->auth.digest.length != 16 &&
254 op->auth.digest.length != 12 &&
255 op->auth.digest.length != 8 &&
256 op->auth.digest.length != 0) {
261 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
263 (*qp->ops->gcm.enc)(&session->gdata, dst, src,
264 (uint64_t)op->cipher.data.length,
267 (uint64_t)op->auth.aad.length,
268 op->auth.digest.data,
269 (uint64_t)op->auth.digest.length);
270 } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
271 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
272 op->auth.digest.length);
279 (*qp->ops->gcm.dec)(&session->gdata, dst, src,
280 (uint64_t)op->cipher.data.length,
283 (uint64_t)op->auth.aad.length,
285 (uint64_t)op->auth.digest.length);
295 * Process a completed job and return rte_mbuf which job processed
297 * @param job JOB_AES_HMAC job to process
300 * - Returns processed mbuf which is trimmed of output digest used in
301 * verification of supplied digest in the case of a HASH_CIPHER operation
302 * - Returns NULL on invalid job
305 post_process_gcm_crypto_op(struct rte_crypto_op *op)
307 struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
309 struct aesni_gcm_session *session =
310 (struct aesni_gcm_session *)op->sym->session->_private;
312 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
314 /* Verify digest if required */
315 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
317 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
318 m->data_len - op->sym->auth.digest.length);
320 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
321 rte_hexdump(stdout, "auth tag (orig):",
322 op->sym->auth.digest.data, op->sym->auth.digest.length);
323 rte_hexdump(stdout, "auth tag (calc):",
324 tag, op->sym->auth.digest.length);
327 if (memcmp(tag, op->sym->auth.digest.data,
328 op->sym->auth.digest.length) != 0)
329 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
331 /* trim area used for digest from mbuf */
332 rte_pktmbuf_trim(m, op->sym->auth.digest.length);
337 * Process a completed GCM request
339 * @param qp Queue Pair to process
340 * @param job JOB_AES_HMAC job
343 * - Number of processed jobs
346 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
347 struct rte_crypto_op *op)
349 post_process_gcm_crypto_op(op);
351 /* Free session if a session-less crypto op */
352 if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
353 rte_mempool_put(qp->sess_mp, op->sym->session);
354 op->sym->session = NULL;
357 rte_ring_enqueue(qp->processed_pkts, (void *)op);
361 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
362 struct rte_crypto_op **ops, uint16_t nb_ops)
364 struct aesni_gcm_session *sess;
365 struct aesni_gcm_qp *qp = queue_pair;
369 for (i = 0; i < nb_ops; i++) {
371 sess = aesni_gcm_get_session(qp, ops[i]->sym);
372 if (unlikely(sess == NULL)) {
373 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
374 qp->qp_stats.enqueue_err_count++;
378 retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
380 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
381 qp->qp_stats.enqueue_err_count++;
385 handle_completed_gcm_crypto_op(qp, ops[i]);
387 qp->qp_stats.enqueued_count++;
393 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
394 struct rte_crypto_op **ops, uint16_t nb_ops)
396 struct aesni_gcm_qp *qp = queue_pair;
398 unsigned nb_dequeued;
400 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
401 (void **)ops, nb_ops);
402 qp->qp_stats.dequeued_count += nb_dequeued;
407 static int aesni_gcm_remove(const char *name);
410 aesni_gcm_create(const char *name,
411 struct rte_crypto_vdev_init_params *init_params)
413 struct rte_cryptodev *dev;
414 char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
415 struct aesni_gcm_private *internals;
416 enum aesni_gcm_vector_mode vector_mode;
418 /* Check CPU for support for AES instruction set */
419 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
420 GCM_LOG_ERR("AES instructions not supported by CPU");
424 /* Check CPU for supported vector instruction set */
425 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
426 vector_mode = RTE_AESNI_GCM_AVX2;
427 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
428 vector_mode = RTE_AESNI_GCM_AVX;
429 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
430 vector_mode = RTE_AESNI_GCM_SSE;
432 GCM_LOG_ERR("Vector instructions are not supported by CPU");
436 /* create a unique device name */
437 if (create_unique_device_name(crypto_dev_name,
438 RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
439 GCM_LOG_ERR("failed to create unique cryptodev name");
444 dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
445 sizeof(struct aesni_gcm_private), init_params->socket_id);
447 GCM_LOG_ERR("failed to create cryptodev vdev");
451 dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
452 dev->dev_ops = rte_aesni_gcm_pmd_ops;
454 /* register rx/tx burst functions for data path */
455 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
456 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
458 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
459 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
460 RTE_CRYPTODEV_FF_CPU_AESNI;
462 switch (vector_mode) {
463 case RTE_AESNI_GCM_SSE:
464 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
466 case RTE_AESNI_GCM_AVX:
467 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
469 case RTE_AESNI_GCM_AVX2:
470 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
476 /* Set vector instructions mode supported */
477 internals = dev->data->dev_private;
479 internals->vector_mode = vector_mode;
481 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
482 internals->max_nb_sessions = init_params->max_nb_sessions;
487 GCM_LOG_ERR("driver %s: create failed", name);
489 aesni_gcm_remove(crypto_dev_name);
494 aesni_gcm_probe(const char *name, const char *input_args)
496 struct rte_crypto_vdev_init_params init_params = {
497 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
498 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
502 rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
504 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
505 init_params.socket_id);
506 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
507 init_params.max_nb_queue_pairs);
508 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
509 init_params.max_nb_sessions);
511 return aesni_gcm_create(name, &init_params);
515 aesni_gcm_remove(const char *name)
520 GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
521 name, rte_socket_id());
526 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
527 .probe = aesni_gcm_probe,
528 .remove = aesni_gcm_remove
531 DRIVER_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
532 DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
533 "max_nb_queue_pairs=<int> "
534 "max_nb_sessions=<int> "