aesni_gcm: add driver for AES-GCM crypto operations
[dpdk.git] / drivers / crypto / aesni_gcm / aesni_gcm_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <openssl/aes.h>
34
35 #include <rte_common.h>
36 #include <rte_config.h>
37 #include <rte_hexdump.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
40 #include <rte_dev.h>
41 #include <rte_malloc.h>
42 #include <rte_cpuflags.h>
43
44 #include "aesni_gcm_pmd_private.h"
45
46 /**
47  * Global static parameter used to create a unique name for each AES-NI multi
48  * buffer crypto device.
49  */
50 static unsigned unique_name_id;
51
52 static inline int
53 create_unique_device_name(char *name, size_t size)
54 {
55         int ret;
56
57         if (name == NULL)
58                 return -EINVAL;
59
60         ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD,
61                         unique_name_id++);
62         if (ret < 0)
63                 return ret;
64         return 0;
65 }
66
67 static int
68 aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
69                 uint8_t *aeskey, unsigned aeskey_length)
70 {
71         uint8_t key[aeskey_length] __rte_aligned(16);
72         AES_KEY enc_key;
73
74         if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
75                 return -EFAULT;
76
77         memcpy(key, aeskey, aeskey_length);
78
79         if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
80                 return -EFAULT;
81
82         AES_encrypt(hsubkey, hsubkey, &enc_key);
83
84         return 0;
85 }
86
87 /** Get xform chain order */
88 static int
89 aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
90 {
91         /*
92          * GCM only supports authenticated encryption or authenticated
93          * decryption, all other options are invalid, so we must have exactly
94          * 2 xform structs chained together
95          */
96         if (xform->next == NULL || xform->next->next != NULL)
97                 return -1;
98
99         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
100                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
101                 return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
102         }
103
104         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
105                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
106                 return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
107         }
108
109         return -1;
110 }
111
112 /** Parse crypto xform chain and set private session parameters */
113 int
114 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
115                 struct aesni_gcm_session *sess,
116                 const struct rte_crypto_sym_xform *xform)
117 {
118         const struct rte_crypto_sym_xform *auth_xform = NULL;
119         const struct rte_crypto_sym_xform *cipher_xform = NULL;
120
121         uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
122
123         /* Select Crypto operation - hash then cipher / cipher then hash */
124         switch (aesni_gcm_get_mode(xform)) {
125         case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
126                 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
127
128                 cipher_xform = xform;
129                 auth_xform = xform->next;
130                 break;
131         case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
132                 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
133
134                 auth_xform = xform;
135                 cipher_xform = xform->next;
136                 break;
137         default:
138                 GCM_LOG_ERR("Unsupported operation chain order parameter");
139                 return -EINVAL;
140         }
141
142         /* We only support AES GCM */
143         if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
144                         auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
145                 return -EINVAL;
146
147         /* Select cipher direction */
148         if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
149                         cipher_xform->cipher.op !=
150                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
151                 GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
152                                 "(DECRYPT) specified are an invalid selection");
153                 return -EINVAL;
154         } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
155                         cipher_xform->cipher.op !=
156                                         RTE_CRYPTO_CIPHER_OP_DECRYPT) {
157                 GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
158                                 "(ENCRYPT) specified are an invalid selection");
159                 return -EINVAL;
160         }
161
162         /* Expand GCM AES128 key */
163         (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
164                         sess->gdata.expanded_keys);
165
166         /* Calculate hash sub key here */
167         aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
168                         cipher_xform->cipher.key.data,
169                         cipher_xform->cipher.key.length);
170
171         /* Calculate GCM pre-compute */
172         (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
173
174         return 0;
175 }
176
177 /** Get gcm session */
178 static struct aesni_gcm_session *
179 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
180 {
181         struct aesni_gcm_session *sess = NULL;
182
183         if (op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
184                 if (unlikely(op->session->type != RTE_CRYPTODEV_AESNI_GCM_PMD))
185                         return sess;
186
187                 sess = (struct aesni_gcm_session *)op->session->_private;
188         } else  {
189                 void *_sess;
190
191                 if (rte_mempool_get(qp->sess_mp, &_sess))
192                         return sess;
193
194                 sess = (struct aesni_gcm_session *)
195                         ((struct rte_cryptodev_session *)_sess)->_private;
196
197                 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
198                                 sess, op->xform) != 0)) {
199                         rte_mempool_put(qp->sess_mp, _sess);
200                         sess = NULL;
201                 }
202         }
203         return sess;
204 }
205
206 /**
207  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
208  * submission to the multi buffer library for processing.
209  *
210  * @param       qp              queue pair
211  * @param       op              symmetric crypto operation
212  * @param       session         GCM session
213  *
214  * @return
215  *
216  */
217 static int
218 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
219                 struct aesni_gcm_session *session)
220 {
221         uint8_t *src, *dst;
222         struct rte_mbuf *m = op->m_src;
223
224         src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
225         dst = op->m_dst ?
226                         rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
227                                         op->cipher.data.offset) :
228                         rte_pktmbuf_mtod_offset(m, uint8_t *,
229                                         op->cipher.data.offset);
230
231         /* sanity checks */
232         if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
233                 GCM_LOG_ERR("iv");
234                 return -1;
235         }
236
237         if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
238                         op->auth.aad.length != 0) {
239                 GCM_LOG_ERR("iv");
240                 return -1;
241         }
242
243         if (op->auth.digest.length != 16 &&
244                         op->auth.digest.length != 12 &&
245                         op->auth.digest.length != 8 &&
246                         op->auth.digest.length != 0) {
247                 GCM_LOG_ERR("iv");
248                 return -1;
249         }
250
251         if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
252
253                 (*qp->ops->gcm.enc)(&session->gdata, dst, src,
254                                 (uint64_t)op->cipher.data.length,
255                                 op->cipher.iv.data,
256                                 op->auth.aad.data,
257                                 (uint64_t)op->auth.aad.length,
258                                 op->auth.digest.data,
259                                 (uint64_t)op->auth.digest.length);
260         } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
261                 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
262                                 op->auth.digest.length);
263
264                 if (!auth_tag) {
265                         GCM_LOG_ERR("iv");
266                         return -1;
267                 }
268
269                 (*qp->ops->gcm.dec)(&session->gdata, dst, src,
270                                 (uint64_t)op->cipher.data.length,
271                                 op->cipher.iv.data,
272                                 op->auth.aad.data,
273                                 (uint64_t)op->auth.aad.length,
274                                 auth_tag,
275                                 (uint64_t)op->auth.digest.length);
276         } else {
277                 GCM_LOG_ERR("iv");
278                 return -1;
279         }
280
281         return 0;
282 }
283
284 /**
285  * Process a completed job and return rte_mbuf which job processed
286  *
287  * @param job   JOB_AES_HMAC job to process
288  *
289  * @return
290  * - Returns processed mbuf which is trimmed of output digest used in
291  * verification of supplied digest in the case of a HASH_CIPHER operation
292  * - Returns NULL on invalid job
293  */
294 static void
295 post_process_gcm_crypto_op(struct rte_crypto_op *op)
296 {
297         struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
298
299         struct aesni_gcm_session *session =
300                 (struct aesni_gcm_session *)op->sym->session->_private;
301
302         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
303
304         /* Verify digest if required */
305         if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
306
307                 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
308                                 m->data_len - op->sym->auth.digest.length);
309
310 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
311                 rte_hexdump(stdout, "auth tag (orig):",
312                                 op->sym->auth.digest.data, op->sym->auth.digest.length);
313                 rte_hexdump(stdout, "auth tag (calc):",
314                                 tag, op->sym->auth.digest.length);
315 #endif
316
317                 if (memcmp(tag, op->sym->auth.digest.data,
318                                 op->sym->auth.digest.length) != 0)
319                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
320
321                 /* trim area used for digest from mbuf */
322                 rte_pktmbuf_trim(m, op->sym->auth.digest.length);
323         }
324 }
325
326 /**
327  * Process a completed GCM request
328  *
329  * @param qp            Queue Pair to process
330  * @param job           JOB_AES_HMAC job
331  *
332  * @return
333  * - Number of processed jobs
334  */
335 static void
336 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
337                 struct rte_crypto_op *op)
338 {
339         post_process_gcm_crypto_op(op);
340
341         /* Free session if a session-less crypto op */
342         if (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
343                 rte_mempool_put(qp->sess_mp, op->sym->session);
344                 op->sym->session = NULL;
345         }
346
347         rte_ring_enqueue(qp->processed_pkts, (void *)op);
348 }
349
350 static uint16_t
351 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
352                 struct rte_crypto_op **ops, uint16_t nb_ops)
353 {
354         struct aesni_gcm_session *sess;
355         struct aesni_gcm_qp *qp = queue_pair;
356
357         int i, retval = 0;
358
359         for (i = 0; i < nb_ops; i++) {
360
361                 sess = aesni_gcm_get_session(qp, ops[i]->sym);
362                 if (unlikely(sess == NULL)) {
363                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
364                         qp->qp_stats.enqueue_err_count++;
365                         break;
366                 }
367
368                 retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
369                 if (retval < 0) {
370                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
371                         qp->qp_stats.enqueue_err_count++;
372                         break;
373                 }
374
375                 handle_completed_gcm_crypto_op(qp, ops[i]);
376
377                 qp->qp_stats.enqueued_count++;
378         }
379         return i;
380 }
381
382 static uint16_t
383 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
384                 struct rte_crypto_op **ops, uint16_t nb_ops)
385 {
386         struct aesni_gcm_qp *qp = queue_pair;
387
388         unsigned nb_dequeued;
389
390         nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
391                         (void **)ops, nb_ops);
392         qp->qp_stats.dequeued_count += nb_dequeued;
393
394         return nb_dequeued;
395 }
396
397 static int aesni_gcm_uninit(const char *name);
398
399 static int
400 aesni_gcm_create(const char *name,
401                 struct rte_crypto_vdev_init_params *init_params)
402 {
403         struct rte_cryptodev *dev;
404         char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
405         struct aesni_gcm_private *internals;
406         enum aesni_gcm_vector_mode vector_mode;
407
408         /* Check CPU for support for AES instruction set */
409         if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
410                 GCM_LOG_ERR("AES instructions not supported by CPU");
411                 return -EFAULT;
412         }
413
414         /* Check CPU for supported vector instruction set */
415         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
416                 vector_mode = RTE_AESNI_GCM_AVX2;
417         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
418                 vector_mode = RTE_AESNI_GCM_AVX;
419         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
420                 vector_mode = RTE_AESNI_GCM_SSE;
421         else {
422                 GCM_LOG_ERR("Vector instructions are not supported by CPU");
423                 return -EFAULT;
424         }
425
426         /* create a unique device name */
427         if (create_unique_device_name(crypto_dev_name,
428                         RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
429                 GCM_LOG_ERR("failed to create unique cryptodev name");
430                 return -EINVAL;
431         }
432
433
434         dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
435                         sizeof(struct aesni_gcm_private), init_params->socket_id);
436         if (dev == NULL) {
437                 GCM_LOG_ERR("failed to create cryptodev vdev");
438                 goto init_error;
439         }
440
441         dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
442         dev->dev_ops = rte_aesni_gcm_pmd_ops;
443
444         /* register rx/tx burst functions for data path */
445         dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
446         dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
447
448         /* Set vector instructions mode supported */
449         internals = dev->data->dev_private;
450
451         internals->vector_mode = vector_mode;
452
453         internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
454         internals->max_nb_sessions = init_params->max_nb_sessions;
455
456         return 0;
457
458 init_error:
459         GCM_LOG_ERR("driver %s: create failed", name);
460
461         aesni_gcm_uninit(crypto_dev_name);
462         return -EFAULT;
463 }
464
465 static int
466 aesni_gcm_init(const char *name, const char *input_args)
467 {
468         struct rte_crypto_vdev_init_params init_params = {
469                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
470                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
471                 rte_socket_id()
472         };
473
474         rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
475
476         RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
477                         init_params.socket_id);
478         RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
479                         init_params.max_nb_queue_pairs);
480         RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
481                         init_params.max_nb_sessions);
482
483         return aesni_gcm_create(name, &init_params);
484 }
485
486 static int
487 aesni_gcm_uninit(const char *name)
488 {
489         if (name == NULL)
490                 return -EINVAL;
491
492         GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
493                         name, rte_socket_id());
494
495         return 0;
496 }
497
498 static struct rte_driver aesni_gcm_pmd_drv = {
499         .name = CRYPTODEV_NAME_AESNI_GCM_PMD,
500         .type = PMD_VDEV,
501         .init = aesni_gcm_init,
502         .uninit = aesni_gcm_uninit
503 };
504
505 PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv);