4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
38 #include <rte_cryptodev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_cpuflags.h>
42 #include <rte_byteorder.h>
44 #include "aesni_gcm_pmd_private.h"
46 /** GCM encode functions pointer table */
47 static const struct aesni_gcm_ops aesni_gcm_enc[] = {
48 [AESNI_GCM_KEY_128] = {
50 aesni_gcm128_enc_update,
51 aesni_gcm128_enc_finalize
53 [AESNI_GCM_KEY_256] = {
55 aesni_gcm256_enc_update,
56 aesni_gcm256_enc_finalize
60 /** GCM decode functions pointer table */
61 static const struct aesni_gcm_ops aesni_gcm_dec[] = {
62 [AESNI_GCM_KEY_128] = {
64 aesni_gcm128_dec_update,
65 aesni_gcm128_dec_finalize
67 [AESNI_GCM_KEY_256] = {
69 aesni_gcm256_dec_update,
70 aesni_gcm256_dec_finalize
74 /** Parse crypto xform chain and set private session parameters */
76 aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
77 const struct rte_crypto_sym_xform *xform)
79 const struct rte_crypto_sym_xform *auth_xform;
80 const struct rte_crypto_sym_xform *cipher_xform;
81 uint16_t digest_length;
86 if (xform->next == NULL) {
88 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
89 GCM_LOG_ERR("Only AES GMAC is supported as an "
90 "authentication only algorithm");
93 /* Set IV parameters */
94 sess->iv.offset = auth_xform->auth.iv.offset;
95 sess->iv.length = auth_xform->auth.iv.length;
97 /* Select Crypto operation */
98 if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
99 sess->op = AESNI_GMAC_OP_GENERATE;
101 sess->op = AESNI_GMAC_OP_VERIFY;
103 key_length = auth_xform->auth.key.length;
104 key = auth_xform->auth.key.data;
107 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
108 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
109 auth_xform = xform->next;
110 cipher_xform = xform;
111 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
112 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
114 cipher_xform = xform->next;
116 GCM_LOG_ERR("Cipher and auth xform required "
117 "when using AES GCM");
121 if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
122 (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM))) {
123 GCM_LOG_ERR("The only combined operation "
124 "supported is AES GCM");
128 /* Set IV parameters */
129 sess->iv.offset = cipher_xform->cipher.iv.offset;
130 sess->iv.length = cipher_xform->cipher.iv.length;
132 /* Select Crypto operation */
133 if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
134 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
135 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
136 else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
137 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
138 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
140 GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
141 " Decrypt/Verify are valid only");
145 key_length = cipher_xform->auth.key.length;
146 key = cipher_xform->auth.key.data;
148 sess->aad_length = auth_xform->auth.add_auth_data_length;
152 if (sess->iv.length != 16 && sess->iv.length != 12 &&
153 sess->iv.length != 0) {
154 GCM_LOG_ERR("Wrong IV length");
158 digest_length = auth_xform->auth.digest_length;
160 /* Check key length and calculate GCM pre-compute. */
161 switch (key_length) {
163 aesni_gcm128_pre(key, &sess->gdata);
164 sess->key = AESNI_GCM_KEY_128;
168 aesni_gcm256_pre(key, &sess->gdata);
169 sess->key = AESNI_GCM_KEY_256;
173 GCM_LOG_ERR("Unsupported cipher/auth key length");
178 if (digest_length != 16 &&
179 digest_length != 12 &&
180 digest_length != 8) {
181 GCM_LOG_ERR("digest");
184 sess->digest_length = digest_length;
189 /** Get gcm session */
190 static struct aesni_gcm_session *
191 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
193 struct aesni_gcm_session *sess = NULL;
194 struct rte_crypto_sym_op *sym_op = op->sym;
196 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
197 if (unlikely(sym_op->session->dev_type
198 != RTE_CRYPTODEV_AESNI_GCM_PMD))
201 sess = (struct aesni_gcm_session *)sym_op->session->_private;
205 if (rte_mempool_get(qp->sess_mp, &_sess))
208 sess = (struct aesni_gcm_session *)
209 ((struct rte_cryptodev_sym_session *)_sess)->_private;
211 if (unlikely(aesni_gcm_set_session_parameters(sess,
212 sym_op->xform) != 0)) {
213 rte_mempool_put(qp->sess_mp, _sess);
221 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
222 * submission to the multi buffer library for processing.
224 * @param qp queue pair
225 * @param op symmetric crypto operation
226 * @param session GCM session
232 process_gcm_crypto_op(struct rte_crypto_op *op,
233 struct aesni_gcm_session *session)
237 struct rte_crypto_sym_op *sym_op = op->sym;
238 struct rte_mbuf *m_src = sym_op->m_src;
239 uint32_t offset, data_offset, data_length;
240 uint32_t part_len, total_len, data_len;
242 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
243 session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
244 offset = sym_op->cipher.data.offset;
245 data_offset = offset;
246 data_length = sym_op->cipher.data.length;
248 offset = sym_op->auth.data.offset;
249 data_offset = offset;
250 data_length = sym_op->auth.data.length;
253 RTE_ASSERT(m_src != NULL);
255 while (offset >= m_src->data_len) {
256 offset -= m_src->data_len;
259 RTE_ASSERT(m_src != NULL);
262 data_len = m_src->data_len - offset;
263 part_len = (data_len < data_length) ? data_len :
266 /* Destination buffer is required when segmented source buffer */
267 RTE_ASSERT((part_len == data_length) ||
268 ((part_len != data_length) &&
269 (sym_op->m_dst != NULL)));
270 /* Segmented destination buffer is not supported */
271 RTE_ASSERT((sym_op->m_dst == NULL) ||
272 ((sym_op->m_dst != NULL) &&
273 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
276 dst = sym_op->m_dst ?
277 rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
279 rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
282 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
284 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
287 * GCM working in 12B IV mode => 16B pre-counter block we need
288 * to set BE LSB to 1, driver expects that 16B is allocated
290 if (session->iv.length == 12) {
291 uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
292 *iv_padd = rte_bswap32(1);
295 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
297 aesni_gcm_enc[session->key].init(&session->gdata,
299 sym_op->auth.aad.data,
300 (uint64_t)session->aad_length);
302 aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
304 total_len = data_length - part_len;
310 RTE_ASSERT(m_src != NULL);
312 src = rte_pktmbuf_mtod(m_src, uint8_t *);
313 part_len = (m_src->data_len < total_len) ?
314 m_src->data_len : total_len;
316 aesni_gcm_enc[session->key].update(&session->gdata,
319 total_len -= part_len;
322 aesni_gcm_enc[session->key].finalize(&session->gdata,
323 sym_op->auth.digest.data,
324 (uint64_t)session->digest_length);
325 } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
326 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
327 sym_op->m_dst : sym_op->m_src,
328 session->digest_length);
331 GCM_LOG_ERR("auth_tag");
335 aesni_gcm_dec[session->key].init(&session->gdata,
337 sym_op->auth.aad.data,
338 (uint64_t)session->aad_length);
340 aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
342 total_len = data_length - part_len;
348 RTE_ASSERT(m_src != NULL);
350 src = rte_pktmbuf_mtod(m_src, uint8_t *);
351 part_len = (m_src->data_len < total_len) ?
352 m_src->data_len : total_len;
354 aesni_gcm_dec[session->key].update(&session->gdata,
357 total_len -= part_len;
360 aesni_gcm_dec[session->key].finalize(&session->gdata,
362 (uint64_t)session->digest_length);
363 } else if (session->op == AESNI_GMAC_OP_GENERATE) {
364 aesni_gcm_enc[session->key].init(&session->gdata,
367 (uint64_t)data_length);
368 aesni_gcm_enc[session->key].finalize(&session->gdata,
369 sym_op->auth.digest.data,
370 (uint64_t)session->digest_length);
371 } else { /* AESNI_GMAC_OP_VERIFY */
372 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
373 sym_op->m_dst : sym_op->m_src,
374 session->digest_length);
377 GCM_LOG_ERR("auth_tag");
381 aesni_gcm_dec[session->key].init(&session->gdata,
384 (uint64_t)data_length);
386 aesni_gcm_dec[session->key].finalize(&session->gdata,
388 (uint64_t)session->digest_length);
395 * Process a completed job and return rte_mbuf which job processed
397 * @param job JOB_AES_HMAC job to process
400 * - Returns processed mbuf which is trimmed of output digest used in
401 * verification of supplied digest in the case of a HASH_CIPHER operation
402 * - Returns NULL on invalid job
405 post_process_gcm_crypto_op(struct rte_crypto_op *op)
407 struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
409 struct aesni_gcm_session *session =
410 (struct aesni_gcm_session *)op->sym->session->_private;
412 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
414 /* Verify digest if required */
415 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
416 session->op == AESNI_GMAC_OP_VERIFY) {
418 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
419 m->data_len - session->digest_length);
421 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
422 rte_hexdump(stdout, "auth tag (orig):",
423 op->sym->auth.digest.data, session->digest_length);
424 rte_hexdump(stdout, "auth tag (calc):",
425 tag, session->digest_length);
428 if (memcmp(tag, op->sym->auth.digest.data,
429 session->digest_length) != 0)
430 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
432 /* trim area used for digest from mbuf */
433 rte_pktmbuf_trim(m, session->digest_length);
438 * Process a completed GCM request
440 * @param qp Queue Pair to process
441 * @param job JOB_AES_HMAC job
444 * - Number of processed jobs
447 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
448 struct rte_crypto_op *op)
450 post_process_gcm_crypto_op(op);
452 /* Free session if a session-less crypto op */
453 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
454 rte_mempool_put(qp->sess_mp, op->sym->session);
455 op->sym->session = NULL;
460 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
461 struct rte_crypto_op **ops, uint16_t nb_ops)
463 struct aesni_gcm_session *sess;
464 struct aesni_gcm_qp *qp = queue_pair;
467 unsigned int i, nb_dequeued;
469 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
470 (void **)ops, nb_ops, NULL);
472 for (i = 0; i < nb_dequeued; i++) {
474 sess = aesni_gcm_get_session(qp, ops[i]);
475 if (unlikely(sess == NULL)) {
476 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
477 qp->qp_stats.dequeue_err_count++;
481 retval = process_gcm_crypto_op(ops[i], sess);
483 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
484 qp->qp_stats.dequeue_err_count++;
488 handle_completed_gcm_crypto_op(qp, ops[i]);
491 qp->qp_stats.dequeued_count += i;
497 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
498 struct rte_crypto_op **ops, uint16_t nb_ops)
500 struct aesni_gcm_qp *qp = queue_pair;
502 unsigned int nb_enqueued;
504 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
505 (void **)ops, nb_ops, NULL);
506 qp->qp_stats.enqueued_count += nb_enqueued;
511 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
514 aesni_gcm_create(const char *name,
515 struct rte_vdev_device *vdev,
516 struct rte_crypto_vdev_init_params *init_params)
518 struct rte_cryptodev *dev;
519 struct aesni_gcm_private *internals;
521 if (init_params->name[0] == '\0')
522 snprintf(init_params->name, sizeof(init_params->name),
525 /* Check CPU for support for AES instruction set */
526 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
527 GCM_LOG_ERR("AES instructions not supported by CPU");
531 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
532 sizeof(struct aesni_gcm_private), init_params->socket_id,
535 GCM_LOG_ERR("failed to create cryptodev vdev");
539 dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
540 dev->dev_ops = rte_aesni_gcm_pmd_ops;
542 /* register rx/tx burst functions for data path */
543 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
544 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
546 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
547 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
548 RTE_CRYPTODEV_FF_CPU_AESNI |
549 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
551 internals = dev->data->dev_private;
553 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
554 internals->max_nb_sessions = init_params->max_nb_sessions;
559 GCM_LOG_ERR("driver %s: create failed", init_params->name);
561 aesni_gcm_remove(vdev);
566 aesni_gcm_probe(struct rte_vdev_device *vdev)
568 struct rte_crypto_vdev_init_params init_params = {
569 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
570 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
575 const char *input_args;
577 name = rte_vdev_device_name(vdev);
580 input_args = rte_vdev_device_args(vdev);
581 rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
583 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
584 init_params.socket_id);
585 if (init_params.name[0] != '\0')
586 RTE_LOG(INFO, PMD, " User defined name = %s\n",
588 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
589 init_params.max_nb_queue_pairs);
590 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
591 init_params.max_nb_sessions);
593 return aesni_gcm_create(name, vdev, &init_params);
597 aesni_gcm_remove(struct rte_vdev_device *vdev)
601 name = rte_vdev_device_name(vdev);
605 GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
606 name, rte_socket_id());
611 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
612 .probe = aesni_gcm_probe,
613 .remove = aesni_gcm_remove
616 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
617 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
618 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
619 "max_nb_queue_pairs=<int> "
620 "max_nb_sessions=<int> "