4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
38 #include <rte_cryptodev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_cpuflags.h>
42 #include <rte_byteorder.h>
44 #include "aesni_gcm_pmd_private.h"
46 /** GCM encode functions pointer table */
47 static const struct aesni_gcm_ops aesni_gcm_enc[] = {
48 [AESNI_GCM_KEY_128] = {
50 aesni_gcm128_enc_update,
51 aesni_gcm128_enc_finalize
53 [AESNI_GCM_KEY_256] = {
55 aesni_gcm256_enc_update,
56 aesni_gcm256_enc_finalize
60 /** GCM decode functions pointer table */
61 static const struct aesni_gcm_ops aesni_gcm_dec[] = {
62 [AESNI_GCM_KEY_128] = {
64 aesni_gcm128_dec_update,
65 aesni_gcm128_dec_finalize
67 [AESNI_GCM_KEY_256] = {
69 aesni_gcm256_dec_update,
70 aesni_gcm256_dec_finalize
74 /** Parse crypto xform chain and set private session parameters */
76 aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
77 const struct rte_crypto_sym_xform *xform)
79 const struct rte_crypto_sym_xform *auth_xform;
80 const struct rte_crypto_sym_xform *cipher_xform;
82 if (xform->next == NULL || xform->next->next != NULL) {
83 GCM_LOG_ERR("Two and only two chained xform required");
87 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
88 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
89 auth_xform = xform->next;
91 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
92 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
94 cipher_xform = xform->next;
96 GCM_LOG_ERR("Cipher and auth xform required");
100 if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
101 (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
102 auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
103 GCM_LOG_ERR("We only support AES GCM and AES GMAC");
107 /* Set IV parameters */
108 sess->iv.offset = cipher_xform->cipher.iv.offset;
109 sess->iv.length = cipher_xform->cipher.iv.length;
112 if (sess->iv.length != 16 && sess->iv.length != 12 &&
113 sess->iv.length != 0) {
114 GCM_LOG_ERR("Wrong IV length");
118 /* Select Crypto operation */
119 if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
120 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
121 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
122 else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
123 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
124 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
126 GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
127 " Decrypt/Verify are valid only");
131 /* Check key length and calculate GCM pre-compute. */
132 switch (cipher_xform->cipher.key.length) {
134 aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
135 sess->key = AESNI_GCM_KEY_128;
139 aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
140 sess->key = AESNI_GCM_KEY_256;
144 GCM_LOG_ERR("Unsupported cipher key length");
148 sess->aad_length = auth_xform->auth.add_auth_data_length;
153 /** Get gcm session */
154 static struct aesni_gcm_session *
155 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
157 struct aesni_gcm_session *sess = NULL;
158 struct rte_crypto_sym_op *sym_op = op->sym;
160 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
161 if (unlikely(sym_op->session->dev_type
162 != RTE_CRYPTODEV_AESNI_GCM_PMD))
165 sess = (struct aesni_gcm_session *)sym_op->session->_private;
169 if (rte_mempool_get(qp->sess_mp, &_sess))
172 sess = (struct aesni_gcm_session *)
173 ((struct rte_cryptodev_sym_session *)_sess)->_private;
175 if (unlikely(aesni_gcm_set_session_parameters(sess,
176 sym_op->xform) != 0)) {
177 rte_mempool_put(qp->sess_mp, _sess);
185 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
186 * submission to the multi buffer library for processing.
188 * @param qp queue pair
189 * @param op symmetric crypto operation
190 * @param session GCM session
196 process_gcm_crypto_op(struct rte_crypto_op *op,
197 struct aesni_gcm_session *session)
201 struct rte_crypto_sym_op *sym_op = op->sym;
202 struct rte_mbuf *m_src = sym_op->m_src;
203 uint32_t offset = sym_op->cipher.data.offset;
204 uint32_t part_len, total_len, data_len;
206 RTE_ASSERT(m_src != NULL);
208 while (offset >= m_src->data_len) {
209 offset -= m_src->data_len;
212 RTE_ASSERT(m_src != NULL);
215 data_len = m_src->data_len - offset;
216 part_len = (data_len < sym_op->cipher.data.length) ? data_len :
217 sym_op->cipher.data.length;
219 /* Destination buffer is required when segmented source buffer */
220 RTE_ASSERT((part_len == sym_op->cipher.data.length) ||
221 ((part_len != sym_op->cipher.data.length) &&
222 (sym_op->m_dst != NULL)));
223 /* Segmented destination buffer is not supported */
224 RTE_ASSERT((sym_op->m_dst == NULL) ||
225 ((sym_op->m_dst != NULL) &&
226 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
229 dst = sym_op->m_dst ?
230 rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
231 sym_op->cipher.data.offset) :
232 rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
233 sym_op->cipher.data.offset);
235 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
237 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
240 * GCM working in 12B IV mode => 16B pre-counter block we need
241 * to set BE LSB to 1, driver expects that 16B is allocated
243 if (session->iv.length == 12) {
244 uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
245 *iv_padd = rte_bswap32(1);
248 if (sym_op->auth.digest.length != 16 &&
249 sym_op->auth.digest.length != 12 &&
250 sym_op->auth.digest.length != 8) {
251 GCM_LOG_ERR("digest");
255 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
257 aesni_gcm_enc[session->key].init(&session->gdata,
259 sym_op->auth.aad.data,
260 (uint64_t)session->aad_length);
262 aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
264 total_len = sym_op->cipher.data.length - part_len;
270 RTE_ASSERT(m_src != NULL);
272 src = rte_pktmbuf_mtod(m_src, uint8_t *);
273 part_len = (m_src->data_len < total_len) ?
274 m_src->data_len : total_len;
276 aesni_gcm_enc[session->key].update(&session->gdata,
279 total_len -= part_len;
282 aesni_gcm_enc[session->key].finalize(&session->gdata,
283 sym_op->auth.digest.data,
284 (uint64_t)sym_op->auth.digest.length);
285 } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
286 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
287 sym_op->m_dst : sym_op->m_src,
288 sym_op->auth.digest.length);
291 GCM_LOG_ERR("auth_tag");
295 aesni_gcm_dec[session->key].init(&session->gdata,
297 sym_op->auth.aad.data,
298 (uint64_t)session->aad_length);
300 aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
302 total_len = sym_op->cipher.data.length - part_len;
308 RTE_ASSERT(m_src != NULL);
310 src = rte_pktmbuf_mtod(m_src, uint8_t *);
311 part_len = (m_src->data_len < total_len) ?
312 m_src->data_len : total_len;
314 aesni_gcm_dec[session->key].update(&session->gdata,
317 total_len -= part_len;
320 aesni_gcm_dec[session->key].finalize(&session->gdata,
322 (uint64_t)sym_op->auth.digest.length);
329 * Process a completed job and return rte_mbuf which job processed
331 * @param job JOB_AES_HMAC job to process
334 * - Returns processed mbuf which is trimmed of output digest used in
335 * verification of supplied digest in the case of a HASH_CIPHER operation
336 * - Returns NULL on invalid job
339 post_process_gcm_crypto_op(struct rte_crypto_op *op)
341 struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
343 struct aesni_gcm_session *session =
344 (struct aesni_gcm_session *)op->sym->session->_private;
346 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
348 /* Verify digest if required */
349 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
351 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
352 m->data_len - op->sym->auth.digest.length);
354 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
355 rte_hexdump(stdout, "auth tag (orig):",
356 op->sym->auth.digest.data, op->sym->auth.digest.length);
357 rte_hexdump(stdout, "auth tag (calc):",
358 tag, op->sym->auth.digest.length);
361 if (memcmp(tag, op->sym->auth.digest.data,
362 op->sym->auth.digest.length) != 0)
363 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
365 /* trim area used for digest from mbuf */
366 rte_pktmbuf_trim(m, op->sym->auth.digest.length);
371 * Process a completed GCM request
373 * @param qp Queue Pair to process
374 * @param job JOB_AES_HMAC job
377 * - Number of processed jobs
380 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
381 struct rte_crypto_op *op)
383 post_process_gcm_crypto_op(op);
385 /* Free session if a session-less crypto op */
386 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
387 rte_mempool_put(qp->sess_mp, op->sym->session);
388 op->sym->session = NULL;
393 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
394 struct rte_crypto_op **ops, uint16_t nb_ops)
396 struct aesni_gcm_session *sess;
397 struct aesni_gcm_qp *qp = queue_pair;
400 unsigned int i, nb_dequeued;
402 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
403 (void **)ops, nb_ops, NULL);
405 for (i = 0; i < nb_dequeued; i++) {
407 sess = aesni_gcm_get_session(qp, ops[i]);
408 if (unlikely(sess == NULL)) {
409 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
410 qp->qp_stats.dequeue_err_count++;
414 retval = process_gcm_crypto_op(ops[i], sess);
416 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
417 qp->qp_stats.dequeue_err_count++;
421 handle_completed_gcm_crypto_op(qp, ops[i]);
424 qp->qp_stats.dequeued_count += i;
430 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
431 struct rte_crypto_op **ops, uint16_t nb_ops)
433 struct aesni_gcm_qp *qp = queue_pair;
435 unsigned int nb_enqueued;
437 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
438 (void **)ops, nb_ops, NULL);
439 qp->qp_stats.enqueued_count += nb_enqueued;
444 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
447 aesni_gcm_create(const char *name,
448 struct rte_vdev_device *vdev,
449 struct rte_crypto_vdev_init_params *init_params)
451 struct rte_cryptodev *dev;
452 struct aesni_gcm_private *internals;
454 if (init_params->name[0] == '\0')
455 snprintf(init_params->name, sizeof(init_params->name),
458 /* Check CPU for support for AES instruction set */
459 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
460 GCM_LOG_ERR("AES instructions not supported by CPU");
464 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
465 sizeof(struct aesni_gcm_private), init_params->socket_id,
468 GCM_LOG_ERR("failed to create cryptodev vdev");
472 dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
473 dev->dev_ops = rte_aesni_gcm_pmd_ops;
475 /* register rx/tx burst functions for data path */
476 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
477 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
479 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
480 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
481 RTE_CRYPTODEV_FF_CPU_AESNI |
482 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
484 internals = dev->data->dev_private;
486 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
487 internals->max_nb_sessions = init_params->max_nb_sessions;
492 GCM_LOG_ERR("driver %s: create failed", init_params->name);
494 aesni_gcm_remove(vdev);
499 aesni_gcm_probe(struct rte_vdev_device *vdev)
501 struct rte_crypto_vdev_init_params init_params = {
502 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
503 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
508 const char *input_args;
510 name = rte_vdev_device_name(vdev);
513 input_args = rte_vdev_device_args(vdev);
514 rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
516 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
517 init_params.socket_id);
518 if (init_params.name[0] != '\0')
519 RTE_LOG(INFO, PMD, " User defined name = %s\n",
521 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
522 init_params.max_nb_queue_pairs);
523 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
524 init_params.max_nb_sessions);
526 return aesni_gcm_create(name, vdev, &init_params);
530 aesni_gcm_remove(struct rte_vdev_device *vdev)
534 name = rte_vdev_device_name(vdev);
538 GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
539 name, rte_socket_id());
544 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
545 .probe = aesni_gcm_probe,
546 .remove = aesni_gcm_remove
549 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
550 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
551 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
552 "max_nb_queue_pairs=<int> "
553 "max_nb_sessions=<int> "