4 * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
38 #include <rte_malloc.h>
39 #include <rte_cpuflags.h>
40 #include <rte_mbuf_offload.h>
42 #include "rte_aesni_mb_pmd_private.h"
45 * Global static parameter used to create a unique name for each AES-NI multi
46 * buffer crypto device.
48 static unsigned unique_name_id;
51 create_unique_device_name(char *name, size_t size)
58 ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_MB_PMD,
65 typedef void (*hash_one_block_t)(void *data, void *digest);
66 typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
69 * Calculate the authentication pre-computes
71 * @param one_block_hash Function pointer to calculate digest on ipad/opad
72 * @param ipad Inner pad output byte array
73 * @param opad Outer pad output byte array
74 * @param hkey Authentication key
75 * @param hkey_len Authentication key length
76 * @param blocksize Block size of selected hash algo
79 calculate_auth_precomputes(hash_one_block_t one_block_hash,
80 uint8_t *ipad, uint8_t *opad,
81 uint8_t *hkey, uint16_t hkey_len,
86 uint8_t ipad_buf[blocksize] __rte_aligned(16);
87 uint8_t opad_buf[blocksize] __rte_aligned(16);
89 /* Setup inner and outer pads */
90 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
91 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
93 /* XOR hash key with inner and outer pads */
94 length = hkey_len > blocksize ? blocksize : hkey_len;
96 for (i = 0; i < length; i++) {
97 ipad_buf[i] ^= hkey[i];
98 opad_buf[i] ^= hkey[i];
101 /* Compute partial hashes */
102 (*one_block_hash)(ipad_buf, ipad);
103 (*one_block_hash)(opad_buf, opad);
106 memset(ipad_buf, 0, blocksize);
107 memset(opad_buf, 0, blocksize);
110 /** Get xform chain order */
112 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
115 * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
116 * operations, all other options are invalid, so we must have exactly
117 * 2 xform structs chained together
119 if (xform->next == NULL || xform->next->next != NULL)
122 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
123 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
126 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
127 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
133 /** Set session authentication parameters */
135 aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
136 struct aesni_mb_session *sess,
137 const struct rte_crypto_sym_xform *xform)
139 hash_one_block_t hash_oneblock_fn;
141 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
142 MB_LOG_ERR("Crypto xform struct not of type auth");
146 /* Set Authentication Parameters */
147 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
148 sess->auth.algo = AES_XCBC;
149 (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
150 sess->auth.xcbc.k1_expanded,
151 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
155 switch (xform->auth.algo) {
156 case RTE_CRYPTO_AUTH_MD5_HMAC:
157 sess->auth.algo = MD5;
158 hash_oneblock_fn = mb_ops->aux.one_block.md5;
160 case RTE_CRYPTO_AUTH_SHA1_HMAC:
161 sess->auth.algo = SHA1;
162 hash_oneblock_fn = mb_ops->aux.one_block.sha1;
164 case RTE_CRYPTO_AUTH_SHA224_HMAC:
165 sess->auth.algo = SHA_224;
166 hash_oneblock_fn = mb_ops->aux.one_block.sha224;
168 case RTE_CRYPTO_AUTH_SHA256_HMAC:
169 sess->auth.algo = SHA_256;
170 hash_oneblock_fn = mb_ops->aux.one_block.sha256;
172 case RTE_CRYPTO_AUTH_SHA384_HMAC:
173 sess->auth.algo = SHA_384;
174 hash_oneblock_fn = mb_ops->aux.one_block.sha384;
176 case RTE_CRYPTO_AUTH_SHA512_HMAC:
177 sess->auth.algo = SHA_512;
178 hash_oneblock_fn = mb_ops->aux.one_block.sha512;
181 MB_LOG_ERR("Unsupported authentication algorithm selection");
185 /* Calculate Authentication precomputes */
186 calculate_auth_precomputes(hash_oneblock_fn,
187 sess->auth.pads.inner, sess->auth.pads.outer,
188 xform->auth.key.data,
189 xform->auth.key.length,
190 get_auth_algo_blocksize(sess->auth.algo));
195 /** Set session cipher parameters */
197 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
198 struct aesni_mb_session *sess,
199 const struct rte_crypto_sym_xform *xform)
201 aes_keyexp_t aes_keyexp_fn;
203 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
204 MB_LOG_ERR("Crypto xform struct not of type cipher");
208 /* Select cipher direction */
209 switch (xform->cipher.op) {
210 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
211 sess->cipher.direction = ENCRYPT;
213 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
214 sess->cipher.direction = DECRYPT;
217 MB_LOG_ERR("Unsupported cipher operation parameter");
221 /* Select cipher mode */
222 switch (xform->cipher.algo) {
223 case RTE_CRYPTO_CIPHER_AES_CBC:
224 sess->cipher.mode = CBC;
227 MB_LOG_ERR("Unsupported cipher mode parameter");
231 /* Check key length and choose key expansion function */
232 switch (xform->cipher.key.length) {
234 sess->cipher.key_length_in_bytes = AES_128_BYTES;
235 aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
238 sess->cipher.key_length_in_bytes = AES_192_BYTES;
239 aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
242 sess->cipher.key_length_in_bytes = AES_256_BYTES;
243 aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
246 MB_LOG_ERR("Unsupported cipher key length");
250 /* Expanded cipher keys */
251 (*aes_keyexp_fn)(xform->cipher.key.data,
252 sess->cipher.expanded_aes_keys.encode,
253 sess->cipher.expanded_aes_keys.decode);
258 /** Parse crypto xform chain and set private session parameters */
260 aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
261 struct aesni_mb_session *sess,
262 const struct rte_crypto_sym_xform *xform)
264 const struct rte_crypto_sym_xform *auth_xform = NULL;
265 const struct rte_crypto_sym_xform *cipher_xform = NULL;
267 /* Select Crypto operation - hash then cipher / cipher then hash */
268 switch (aesni_mb_get_chain_order(xform)) {
270 sess->chain_order = HASH_CIPHER;
272 cipher_xform = xform->next;
275 sess->chain_order = CIPHER_HASH;
276 auth_xform = xform->next;
277 cipher_xform = xform;
280 MB_LOG_ERR("Unsupported operation chain order parameter");
284 if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
285 MB_LOG_ERR("Invalid/unsupported authentication parameters");
289 if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
291 MB_LOG_ERR("Invalid/unsupported cipher parameters");
297 /** Get multi buffer session */
298 static struct aesni_mb_session *
299 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
301 struct aesni_mb_session *sess = NULL;
303 if (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
304 if (unlikely(op->sym->session->type !=
305 RTE_CRYPTODEV_AESNI_MB_PMD))
308 sess = (struct aesni_mb_session *)op->sym->session->_private;
312 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
315 sess = (struct aesni_mb_session *)
316 ((struct rte_cryptodev_sym_session *)_sess)->_private;
318 if (unlikely(aesni_mb_set_session_parameters(qp->ops,
319 sess, op->sym->xform) != 0)) {
320 rte_mempool_put(qp->sess_mp, _sess);
329 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
330 * submission to the multi buffer library for processing.
332 * @param qp queue pair
333 * @param job JOB_AES_HMAC structure to fill
334 * @param m mbuf to process
337 * - Completed JOB_AES_HMAC structure pointer on success
338 * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
340 static JOB_AES_HMAC *
341 process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
342 struct aesni_mb_session *session)
346 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
347 uint16_t m_offset = 0;
349 job = (*qp->ops->job.get_next)(&qp->mb_mgr);
350 if (unlikely(job == NULL))
353 /* Set crypto operation */
354 job->chain_order = session->chain_order;
356 /* Set cipher parameters */
357 job->cipher_direction = session->cipher.direction;
358 job->cipher_mode = session->cipher.mode;
360 job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
361 job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
362 job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
365 /* Set authentication parameters */
366 job->hash_alg = session->auth.algo;
367 if (job->hash_alg == AES_XCBC) {
368 job->_k1_expanded = session->auth.xcbc.k1_expanded;
369 job->_k2 = session->auth.xcbc.k2;
370 job->_k3 = session->auth.xcbc.k3;
372 job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
373 job->hashed_auth_key_xor_opad = session->auth.pads.outer;
376 /* Mutable crypto operation parameters */
377 if (op->sym->m_dst) {
378 m_src = m_dst = op->sym->m_dst;
380 /* append space for output data to mbuf */
381 char *odata = rte_pktmbuf_append(m_dst,
382 rte_pktmbuf_data_len(op->sym->m_src));
384 MB_LOG_ERR("failed to allocate space in destination "
385 "mbuf for source data");
387 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
388 rte_pktmbuf_data_len(op->sym->m_src));
391 m_offset = op->sym->cipher.data.offset;
394 /* Set digest output location */
395 if (job->cipher_direction == DECRYPT) {
396 job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
397 get_digest_byte_length(job->hash_alg));
399 if (job->auth_tag_output == NULL) {
400 MB_LOG_ERR("failed to allocate space in output mbuf "
405 memset(job->auth_tag_output, 0,
406 sizeof(get_digest_byte_length(job->hash_alg)));
409 job->auth_tag_output = op->sym->auth.digest.data;
413 * Multi-buffer library current only support returning a truncated
414 * digest length as specified in the relevant IPsec RFCs
416 job->auth_tag_output_len_in_bytes =
417 get_truncated_digest_byte_length(job->hash_alg);
419 /* Set IV parameters */
420 job->iv = op->sym->cipher.iv.data;
421 job->iv_len_in_bytes = op->sym->cipher.iv.length;
424 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
425 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
427 job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
428 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
430 job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
431 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
433 /* Set user data to be crypto operation data struct */
435 job->user_data2 = m_dst;
441 * Process a completed job and return rte_mbuf which job processed
443 * @param job JOB_AES_HMAC job to process
446 * - Returns processed mbuf which is trimmed of output digest used in
447 * verification of supplied digest in the case of a HASH_CIPHER operation
448 * - Returns NULL on invalid job
450 static struct rte_crypto_op *
451 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
453 struct rte_crypto_op *op =
454 (struct rte_crypto_op *)job->user_data;
455 struct rte_mbuf *m_dst =
456 (struct rte_mbuf *)job->user_data2;
458 if (op == NULL || m_dst == NULL)
461 /* set status as successful by default */
462 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
464 /* check if job has been processed */
465 if (unlikely(job->status != STS_COMPLETED)) {
466 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
468 } else if (job->chain_order == HASH_CIPHER) {
469 /* Verify digest if required */
470 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
471 job->auth_tag_output_len_in_bytes) != 0)
472 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
474 /* trim area used for digest from mbuf */
475 rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
478 /* Free session if a session-less crypto op */
479 if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
480 rte_mempool_put(qp->sess_mp, op->sym->session);
481 op->sym->session = NULL;
488 * Process a completed JOB_AES_HMAC job and keep processing jobs until
489 * get_completed_job return NULL
491 * @param qp Queue Pair to process
492 * @param job JOB_AES_HMAC job
495 * - Number of processed jobs
498 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
500 struct rte_crypto_op *op = NULL;
501 unsigned processed_jobs = 0;
505 op = post_process_mb_job(qp, job);
507 rte_ring_enqueue(qp->processed_ops, (void *)op);
509 qp->stats.dequeue_err_count++;
510 job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
513 return processed_jobs;
517 aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
520 struct aesni_mb_session *sess;
521 struct aesni_mb_qp *qp = queue_pair;
523 JOB_AES_HMAC *job = NULL;
525 int i, processed_jobs = 0;
527 for (i = 0; i < nb_ops; i++) {
528 #ifdef RTE_LIBRTE_AESNI_MB_DEBUG
529 if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
530 MB_LOG_ERR("PMD only supports symmetric crypto "
531 "operation requests, op (%p) is not a "
532 "symmetric operation.", op);
533 qp->stats.enqueue_err_count++;
537 sess = get_session(qp, ops[i]);
538 if (unlikely(sess == NULL)) {
539 qp->stats.enqueue_err_count++;
543 job = process_crypto_op(qp, ops[i], sess);
544 if (unlikely(job == NULL)) {
545 qp->stats.enqueue_err_count++;
550 job = (*qp->ops->job.submit)(&qp->mb_mgr);
553 * If submit returns a processed job then handle it,
554 * before submitting subsequent jobs
557 processed_jobs += handle_completed_jobs(qp, job);
560 if (processed_jobs == 0)
563 qp->stats.enqueued_count += processed_jobs;
568 * If we haven't processed any jobs in submit loop, then flush jobs
569 * queue to stop the output stalling
571 job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
573 qp->stats.enqueued_count += handle_completed_jobs(qp, job);
579 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
582 struct aesni_mb_qp *qp = queue_pair;
584 unsigned nb_dequeued;
586 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
587 (void **)ops, nb_ops);
588 qp->stats.dequeued_count += nb_dequeued;
594 static int cryptodev_aesni_mb_uninit(const char *name);
597 cryptodev_aesni_mb_create(const char *name, unsigned socket_id)
599 struct rte_cryptodev *dev;
600 char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
601 struct aesni_mb_private *internals;
602 enum aesni_mb_vector_mode vector_mode;
604 /* Check CPU for support for AES instruction set */
605 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
606 MB_LOG_ERR("AES instructions not supported by CPU");
610 /* Check CPU for supported vector instruction set */
611 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
612 vector_mode = RTE_AESNI_MB_AVX2;
613 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
614 vector_mode = RTE_AESNI_MB_AVX;
615 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
616 vector_mode = RTE_AESNI_MB_SSE;
618 MB_LOG_ERR("Vector instructions are not supported by CPU");
622 /* create a unique device name */
623 if (create_unique_device_name(crypto_dev_name,
624 RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
625 MB_LOG_ERR("failed to create unique cryptodev name");
630 dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
631 sizeof(struct aesni_mb_private), socket_id);
633 MB_LOG_ERR("failed to create cryptodev vdev");
637 dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
638 dev->dev_ops = rte_aesni_mb_pmd_ops;
640 /* register rx/tx burst functions for data path */
641 dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
642 dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
644 /* Set vector instructions mode supported */
645 internals = dev->data->dev_private;
647 internals->vector_mode = vector_mode;
648 internals->max_nb_queue_pairs = RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS;
649 internals->max_nb_sessions = RTE_AESNI_MB_PMD_MAX_NB_SESSIONS;
653 MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
655 cryptodev_aesni_mb_uninit(crypto_dev_name);
661 cryptodev_aesni_mb_init(const char *name,
662 const char *params __rte_unused)
664 RTE_LOG(INFO, PMD, "Initialising %s\n", name);
666 return cryptodev_aesni_mb_create(name, rte_socket_id());
670 cryptodev_aesni_mb_uninit(const char *name)
675 RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
676 name, rte_socket_id());
681 static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
682 .name = CRYPTODEV_NAME_AESNI_MB_PMD,
684 .init = cryptodev_aesni_mb_init,
685 .uninit = cryptodev_aesni_mb_uninit
688 PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv);