4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
38 #include <rte_cryptodev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_cpuflags.h>
42 #include <rte_byteorder.h>
44 #include "aesni_gcm_pmd_private.h"
46 /** GCM encode functions pointer table */
47 static const struct aesni_gcm_ops aesni_gcm_enc[] = {
48 [AESNI_GCM_KEY_128] = {
50 aesni_gcm128_enc_update,
51 aesni_gcm128_enc_finalize
53 [AESNI_GCM_KEY_256] = {
55 aesni_gcm256_enc_update,
56 aesni_gcm256_enc_finalize
60 /** GCM decode functions pointer table */
61 static const struct aesni_gcm_ops aesni_gcm_dec[] = {
62 [AESNI_GCM_KEY_128] = {
64 aesni_gcm128_dec_update,
65 aesni_gcm128_dec_finalize
67 [AESNI_GCM_KEY_256] = {
69 aesni_gcm256_dec_update,
70 aesni_gcm256_dec_finalize
74 /** Parse crypto xform chain and set private session parameters */
76 aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
77 const struct rte_crypto_sym_xform *xform)
79 const struct rte_crypto_sym_xform *auth_xform;
80 const struct rte_crypto_sym_xform *cipher_xform;
81 uint16_t digest_length;
83 if (xform->next == NULL || xform->next->next != NULL) {
84 GCM_LOG_ERR("Two and only two chained xform required");
88 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
89 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
90 auth_xform = xform->next;
92 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
93 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
95 cipher_xform = xform->next;
97 GCM_LOG_ERR("Cipher and auth xform required");
101 if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
102 (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
103 auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
104 GCM_LOG_ERR("We only support AES GCM and AES GMAC");
108 /* Set IV parameters */
109 sess->iv.offset = cipher_xform->cipher.iv.offset;
110 sess->iv.length = cipher_xform->cipher.iv.length;
113 if (sess->iv.length != 16 && sess->iv.length != 12 &&
114 sess->iv.length != 0) {
115 GCM_LOG_ERR("Wrong IV length");
119 /* Select Crypto operation */
120 if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
121 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
122 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
123 else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
124 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
125 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
127 GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
128 " Decrypt/Verify are valid only");
132 digest_length = auth_xform->auth.digest_length;
134 /* Check key length and calculate GCM pre-compute. */
135 switch (cipher_xform->cipher.key.length) {
137 aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
138 sess->key = AESNI_GCM_KEY_128;
142 aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
143 sess->key = AESNI_GCM_KEY_256;
147 GCM_LOG_ERR("Unsupported cipher key length");
151 sess->aad_length = auth_xform->auth.add_auth_data_length;
153 if (digest_length != 16 &&
154 digest_length != 12 &&
155 digest_length != 8) {
156 GCM_LOG_ERR("digest");
159 sess->digest_length = digest_length;
164 /** Get gcm session */
165 static struct aesni_gcm_session *
166 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
168 struct aesni_gcm_session *sess = NULL;
169 struct rte_crypto_sym_op *sym_op = op->sym;
171 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
172 if (unlikely(sym_op->session->dev_type
173 != RTE_CRYPTODEV_AESNI_GCM_PMD))
176 sess = (struct aesni_gcm_session *)sym_op->session->_private;
180 if (rte_mempool_get(qp->sess_mp, &_sess))
183 sess = (struct aesni_gcm_session *)
184 ((struct rte_cryptodev_sym_session *)_sess)->_private;
186 if (unlikely(aesni_gcm_set_session_parameters(sess,
187 sym_op->xform) != 0)) {
188 rte_mempool_put(qp->sess_mp, _sess);
196 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
197 * submission to the multi buffer library for processing.
199 * @param qp queue pair
200 * @param op symmetric crypto operation
201 * @param session GCM session
207 process_gcm_crypto_op(struct rte_crypto_op *op,
208 struct aesni_gcm_session *session)
212 struct rte_crypto_sym_op *sym_op = op->sym;
213 struct rte_mbuf *m_src = sym_op->m_src;
214 uint32_t offset = sym_op->cipher.data.offset;
215 uint32_t part_len, total_len, data_len;
217 RTE_ASSERT(m_src != NULL);
219 while (offset >= m_src->data_len) {
220 offset -= m_src->data_len;
223 RTE_ASSERT(m_src != NULL);
226 data_len = m_src->data_len - offset;
227 part_len = (data_len < sym_op->cipher.data.length) ? data_len :
228 sym_op->cipher.data.length;
230 /* Destination buffer is required when segmented source buffer */
231 RTE_ASSERT((part_len == sym_op->cipher.data.length) ||
232 ((part_len != sym_op->cipher.data.length) &&
233 (sym_op->m_dst != NULL)));
234 /* Segmented destination buffer is not supported */
235 RTE_ASSERT((sym_op->m_dst == NULL) ||
236 ((sym_op->m_dst != NULL) &&
237 rte_pktmbuf_is_contiguous(sym_op->m_dst)));
240 dst = sym_op->m_dst ?
241 rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
242 sym_op->cipher.data.offset) :
243 rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
244 sym_op->cipher.data.offset);
246 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
248 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
251 * GCM working in 12B IV mode => 16B pre-counter block we need
252 * to set BE LSB to 1, driver expects that 16B is allocated
254 if (session->iv.length == 12) {
255 uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
256 *iv_padd = rte_bswap32(1);
259 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
261 aesni_gcm_enc[session->key].init(&session->gdata,
263 sym_op->auth.aad.data,
264 (uint64_t)session->aad_length);
266 aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
268 total_len = sym_op->cipher.data.length - part_len;
274 RTE_ASSERT(m_src != NULL);
276 src = rte_pktmbuf_mtod(m_src, uint8_t *);
277 part_len = (m_src->data_len < total_len) ?
278 m_src->data_len : total_len;
280 aesni_gcm_enc[session->key].update(&session->gdata,
283 total_len -= part_len;
286 aesni_gcm_enc[session->key].finalize(&session->gdata,
287 sym_op->auth.digest.data,
288 (uint64_t)session->digest_length);
289 } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
290 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
291 sym_op->m_dst : sym_op->m_src,
292 session->digest_length);
295 GCM_LOG_ERR("auth_tag");
299 aesni_gcm_dec[session->key].init(&session->gdata,
301 sym_op->auth.aad.data,
302 (uint64_t)session->aad_length);
304 aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
306 total_len = sym_op->cipher.data.length - part_len;
312 RTE_ASSERT(m_src != NULL);
314 src = rte_pktmbuf_mtod(m_src, uint8_t *);
315 part_len = (m_src->data_len < total_len) ?
316 m_src->data_len : total_len;
318 aesni_gcm_dec[session->key].update(&session->gdata,
321 total_len -= part_len;
324 aesni_gcm_dec[session->key].finalize(&session->gdata,
326 (uint64_t)session->digest_length);
333 * Process a completed job and return rte_mbuf which job processed
335 * @param job JOB_AES_HMAC job to process
338 * - Returns processed mbuf which is trimmed of output digest used in
339 * verification of supplied digest in the case of a HASH_CIPHER operation
340 * - Returns NULL on invalid job
343 post_process_gcm_crypto_op(struct rte_crypto_op *op)
345 struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
347 struct aesni_gcm_session *session =
348 (struct aesni_gcm_session *)op->sym->session->_private;
350 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
352 /* Verify digest if required */
353 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
355 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
356 m->data_len - session->digest_length);
358 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
359 rte_hexdump(stdout, "auth tag (orig):",
360 op->sym->auth.digest.data, session->digest_length);
361 rte_hexdump(stdout, "auth tag (calc):",
362 tag, session->digest_length);
365 if (memcmp(tag, op->sym->auth.digest.data,
366 session->digest_length) != 0)
367 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
369 /* trim area used for digest from mbuf */
370 rte_pktmbuf_trim(m, session->digest_length);
375 * Process a completed GCM request
377 * @param qp Queue Pair to process
378 * @param job JOB_AES_HMAC job
381 * - Number of processed jobs
384 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
385 struct rte_crypto_op *op)
387 post_process_gcm_crypto_op(op);
389 /* Free session if a session-less crypto op */
390 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
391 rte_mempool_put(qp->sess_mp, op->sym->session);
392 op->sym->session = NULL;
397 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
398 struct rte_crypto_op **ops, uint16_t nb_ops)
400 struct aesni_gcm_session *sess;
401 struct aesni_gcm_qp *qp = queue_pair;
404 unsigned int i, nb_dequeued;
406 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
407 (void **)ops, nb_ops, NULL);
409 for (i = 0; i < nb_dequeued; i++) {
411 sess = aesni_gcm_get_session(qp, ops[i]);
412 if (unlikely(sess == NULL)) {
413 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
414 qp->qp_stats.dequeue_err_count++;
418 retval = process_gcm_crypto_op(ops[i], sess);
420 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
421 qp->qp_stats.dequeue_err_count++;
425 handle_completed_gcm_crypto_op(qp, ops[i]);
428 qp->qp_stats.dequeued_count += i;
434 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
435 struct rte_crypto_op **ops, uint16_t nb_ops)
437 struct aesni_gcm_qp *qp = queue_pair;
439 unsigned int nb_enqueued;
441 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
442 (void **)ops, nb_ops, NULL);
443 qp->qp_stats.enqueued_count += nb_enqueued;
448 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
451 aesni_gcm_create(const char *name,
452 struct rte_vdev_device *vdev,
453 struct rte_crypto_vdev_init_params *init_params)
455 struct rte_cryptodev *dev;
456 struct aesni_gcm_private *internals;
458 if (init_params->name[0] == '\0')
459 snprintf(init_params->name, sizeof(init_params->name),
462 /* Check CPU for support for AES instruction set */
463 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
464 GCM_LOG_ERR("AES instructions not supported by CPU");
468 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
469 sizeof(struct aesni_gcm_private), init_params->socket_id,
472 GCM_LOG_ERR("failed to create cryptodev vdev");
476 dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
477 dev->dev_ops = rte_aesni_gcm_pmd_ops;
479 /* register rx/tx burst functions for data path */
480 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
481 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
483 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
484 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
485 RTE_CRYPTODEV_FF_CPU_AESNI |
486 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
488 internals = dev->data->dev_private;
490 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
491 internals->max_nb_sessions = init_params->max_nb_sessions;
496 GCM_LOG_ERR("driver %s: create failed", init_params->name);
498 aesni_gcm_remove(vdev);
503 aesni_gcm_probe(struct rte_vdev_device *vdev)
505 struct rte_crypto_vdev_init_params init_params = {
506 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
507 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
512 const char *input_args;
514 name = rte_vdev_device_name(vdev);
517 input_args = rte_vdev_device_args(vdev);
518 rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
520 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
521 init_params.socket_id);
522 if (init_params.name[0] != '\0')
523 RTE_LOG(INFO, PMD, " User defined name = %s\n",
525 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
526 init_params.max_nb_queue_pairs);
527 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
528 init_params.max_nb_sessions);
530 return aesni_gcm_create(name, vdev, &init_params);
534 aesni_gcm_remove(struct rte_vdev_device *vdev)
538 name = rte_vdev_device_name(vdev);
542 GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
543 name, rte_socket_id());
548 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
549 .probe = aesni_gcm_probe,
550 .remove = aesni_gcm_remove
553 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
554 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
555 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
556 "max_nb_queue_pairs=<int> "
557 "max_nb_sessions=<int> "