4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
41 #include <rte_byteorder.h>
43 #include "aesni_gcm_pmd_private.h"
45 /** GCM encode functions pointer table */
46 static const struct aesni_gcm_ops aesni_gcm_enc[] = {
47 [AESNI_GCM_KEY_128] = {
49 aesni_gcm128_enc_update,
50 aesni_gcm128_enc_finalize
52 [AESNI_GCM_KEY_256] = {
54 aesni_gcm256_enc_update,
55 aesni_gcm256_enc_finalize
59 /** GCM decode functions pointer table */
60 static const struct aesni_gcm_ops aesni_gcm_dec[] = {
61 [AESNI_GCM_KEY_128] = {
63 aesni_gcm128_dec_update,
64 aesni_gcm128_dec_finalize
66 [AESNI_GCM_KEY_256] = {
68 aesni_gcm256_dec_update,
69 aesni_gcm256_dec_finalize
73 /** Parse crypto xform chain and set private session parameters */
75 aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
76 const struct rte_crypto_sym_xform *xform)
78 const struct rte_crypto_sym_xform *auth_xform;
79 const struct rte_crypto_sym_xform *cipher_xform;
81 if (xform->next == NULL || xform->next->next != NULL) {
82 GCM_LOG_ERR("Two and only two chained xform required");
86 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
87 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
88 auth_xform = xform->next;
90 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
91 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
93 cipher_xform = xform->next;
95 GCM_LOG_ERR("Cipher and auth xform required");
99 if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
100 (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
101 auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
102 GCM_LOG_ERR("We only support AES GCM and AES GMAC");
106 /* Select Crypto operation */
107 if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
108 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
109 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
110 else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
111 auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
112 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
114 GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
115 " Decrypt/Verify are valid only");
119 /* Check key length and calculate GCM pre-compute. */
120 switch (cipher_xform->cipher.key.length) {
122 aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
123 sess->key = AESNI_GCM_KEY_128;
127 aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
128 sess->key = AESNI_GCM_KEY_256;
132 GCM_LOG_ERR("Unsupported cipher key length");
139 /** Get gcm session */
140 static struct aesni_gcm_session *
141 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
143 struct aesni_gcm_session *sess = NULL;
145 if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
146 if (unlikely(op->session->dev_type
147 != RTE_CRYPTODEV_AESNI_GCM_PMD))
150 sess = (struct aesni_gcm_session *)op->session->_private;
154 if (rte_mempool_get(qp->sess_mp, &_sess))
157 sess = (struct aesni_gcm_session *)
158 ((struct rte_cryptodev_sym_session *)_sess)->_private;
160 if (unlikely(aesni_gcm_set_session_parameters(sess,
162 rte_mempool_put(qp->sess_mp, _sess);
170 * Process a crypto operation and complete a JOB_AES_HMAC job structure for
171 * submission to the multi buffer library for processing.
173 * @param qp queue pair
174 * @param op symmetric crypto operation
175 * @param session GCM session
181 process_gcm_crypto_op(struct rte_crypto_sym_op *op,
182 struct aesni_gcm_session *session)
185 struct rte_mbuf *m_src = op->m_src;
186 uint32_t offset = op->cipher.data.offset;
187 uint32_t part_len, total_len, data_len;
189 RTE_ASSERT(m_src != NULL);
191 while (offset >= m_src->data_len) {
192 offset -= m_src->data_len;
195 RTE_ASSERT(m_src != NULL);
198 data_len = m_src->data_len - offset;
199 part_len = (data_len < op->cipher.data.length) ? data_len :
200 op->cipher.data.length;
202 /* Destination buffer is required when segmented source buffer */
203 RTE_ASSERT((part_len == op->cipher.data.length) ||
204 ((part_len != op->cipher.data.length) &&
205 (op->m_dst != NULL)));
206 /* Segmented destination buffer is not supported */
207 RTE_ASSERT((op->m_dst == NULL) ||
208 ((op->m_dst != NULL) &&
209 rte_pktmbuf_is_contiguous(op->m_dst)));
213 rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
214 op->cipher.data.offset) :
215 rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
216 op->cipher.data.offset);
218 src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
221 if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
222 op->cipher.iv.length != 0) {
228 * GCM working in 12B IV mode => 16B pre-counter block we need
229 * to set BE LSB to 1, driver expects that 16B is allocated
231 if (op->cipher.iv.length == 12) {
232 uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
233 *iv_padd = rte_bswap32(1);
236 if (op->auth.digest.length != 16 &&
237 op->auth.digest.length != 12 &&
238 op->auth.digest.length != 8) {
239 GCM_LOG_ERR("digest");
243 if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
245 aesni_gcm_enc[session->key].init(&session->gdata,
248 (uint64_t)op->auth.aad.length);
250 aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
252 total_len = op->cipher.data.length - part_len;
258 RTE_ASSERT(m_src != NULL);
260 src = rte_pktmbuf_mtod(m_src, uint8_t *);
261 part_len = (m_src->data_len < total_len) ?
262 m_src->data_len : total_len;
264 aesni_gcm_enc[session->key].update(&session->gdata,
267 total_len -= part_len;
270 aesni_gcm_enc[session->key].finalize(&session->gdata,
271 op->auth.digest.data,
272 (uint64_t)op->auth.digest.length);
273 } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
274 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
275 op->m_dst : op->m_src,
276 op->auth.digest.length);
279 GCM_LOG_ERR("auth_tag");
283 aesni_gcm_dec[session->key].init(&session->gdata,
286 (uint64_t)op->auth.aad.length);
288 aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
290 total_len = op->cipher.data.length - part_len;
296 RTE_ASSERT(m_src != NULL);
298 src = rte_pktmbuf_mtod(m_src, uint8_t *);
299 part_len = (m_src->data_len < total_len) ?
300 m_src->data_len : total_len;
302 aesni_gcm_dec[session->key].update(&session->gdata,
305 total_len -= part_len;
308 aesni_gcm_dec[session->key].finalize(&session->gdata,
310 (uint64_t)op->auth.digest.length);
317 * Process a completed job and return rte_mbuf which job processed
319 * @param job JOB_AES_HMAC job to process
322 * - Returns processed mbuf which is trimmed of output digest used in
323 * verification of supplied digest in the case of a HASH_CIPHER operation
324 * - Returns NULL on invalid job
327 post_process_gcm_crypto_op(struct rte_crypto_op *op)
329 struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
331 struct aesni_gcm_session *session =
332 (struct aesni_gcm_session *)op->sym->session->_private;
334 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
336 /* Verify digest if required */
337 if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
339 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
340 m->data_len - op->sym->auth.digest.length);
342 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
343 rte_hexdump(stdout, "auth tag (orig):",
344 op->sym->auth.digest.data, op->sym->auth.digest.length);
345 rte_hexdump(stdout, "auth tag (calc):",
346 tag, op->sym->auth.digest.length);
349 if (memcmp(tag, op->sym->auth.digest.data,
350 op->sym->auth.digest.length) != 0)
351 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
353 /* trim area used for digest from mbuf */
354 rte_pktmbuf_trim(m, op->sym->auth.digest.length);
359 * Process a completed GCM request
361 * @param qp Queue Pair to process
362 * @param job JOB_AES_HMAC job
365 * - Number of processed jobs
368 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
369 struct rte_crypto_op *op)
371 post_process_gcm_crypto_op(op);
373 /* Free session if a session-less crypto op */
374 if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
375 rte_mempool_put(qp->sess_mp, op->sym->session);
376 op->sym->session = NULL;
381 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
382 struct rte_crypto_op **ops, uint16_t nb_ops)
384 struct aesni_gcm_session *sess;
385 struct aesni_gcm_qp *qp = queue_pair;
388 unsigned int i, nb_dequeued;
390 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
391 (void **)ops, nb_ops, NULL);
393 for (i = 0; i < nb_dequeued; i++) {
395 sess = aesni_gcm_get_session(qp, ops[i]->sym);
396 if (unlikely(sess == NULL)) {
397 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
398 qp->qp_stats.dequeue_err_count++;
402 retval = process_gcm_crypto_op(ops[i]->sym, sess);
404 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
405 qp->qp_stats.dequeue_err_count++;
409 handle_completed_gcm_crypto_op(qp, ops[i]);
412 qp->qp_stats.dequeued_count += i;
418 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
419 struct rte_crypto_op **ops, uint16_t nb_ops)
421 struct aesni_gcm_qp *qp = queue_pair;
423 unsigned int nb_enqueued;
425 nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
426 (void **)ops, nb_ops, NULL);
427 qp->qp_stats.enqueued_count += nb_enqueued;
432 static int aesni_gcm_remove(struct rte_vdev_device *vdev);
435 aesni_gcm_create(const char *name,
436 struct rte_vdev_device *vdev,
437 struct rte_crypto_vdev_init_params *init_params)
439 struct rte_cryptodev *dev;
440 struct aesni_gcm_private *internals;
442 if (init_params->name[0] == '\0')
443 snprintf(init_params->name, sizeof(init_params->name),
446 /* Check CPU for support for AES instruction set */
447 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
448 GCM_LOG_ERR("AES instructions not supported by CPU");
452 dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
453 sizeof(struct aesni_gcm_private), init_params->socket_id);
455 GCM_LOG_ERR("failed to create cryptodev vdev");
459 dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
460 dev->dev_ops = rte_aesni_gcm_pmd_ops;
462 /* register rx/tx burst functions for data path */
463 dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
464 dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
466 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
467 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
468 RTE_CRYPTODEV_FF_CPU_AESNI |
469 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
471 internals = dev->data->dev_private;
473 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
474 internals->max_nb_sessions = init_params->max_nb_sessions;
479 GCM_LOG_ERR("driver %s: create failed", init_params->name);
481 aesni_gcm_remove(vdev);
486 aesni_gcm_probe(struct rte_vdev_device *vdev)
488 struct rte_crypto_vdev_init_params init_params = {
489 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
490 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
495 const char *input_args;
497 name = rte_vdev_device_name(vdev);
500 input_args = rte_vdev_device_args(vdev);
501 rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
503 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
504 init_params.socket_id);
505 if (init_params.name[0] != '\0')
506 RTE_LOG(INFO, PMD, " User defined name = %s\n",
508 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
509 init_params.max_nb_queue_pairs);
510 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
511 init_params.max_nb_sessions);
513 return aesni_gcm_create(name, vdev, &init_params);
517 aesni_gcm_remove(struct rte_vdev_device *vdev)
521 name = rte_vdev_device_name(vdev);
525 GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
526 name, rte_socket_id());
531 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
532 .probe = aesni_gcm_probe,
533 .remove = aesni_gcm_remove
536 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
537 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
538 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
539 "max_nb_queue_pairs=<int> "
540 "max_nb_sessions=<int> "