4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
41 #include <rte_kvargs.h>
43 #include "rte_kasumi_pmd_private.h"
45 #define KASUMI_KEY_LENGTH 16
46 #define KASUMI_IV_LENGTH 8
47 #define KASUMI_DIGEST_LENGTH 4
48 #define KASUMI_MAX_BURST 4
52 * Global static parameter used to create a unique name for each KASUMI
55 static unsigned unique_name_id;
58 create_unique_device_name(char *name, size_t size)
65 ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_KASUMI_PMD,
72 /** Get xform chain order. */
73 static enum kasumi_operation
74 kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
77 return KASUMI_OP_NOT_SUPPORTED;
80 if (xform->next->next != NULL)
81 return KASUMI_OP_NOT_SUPPORTED;
83 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
84 if (xform->next == NULL)
85 return KASUMI_OP_ONLY_AUTH;
86 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
87 return KASUMI_OP_AUTH_CIPHER;
89 return KASUMI_OP_NOT_SUPPORTED;
92 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
93 if (xform->next == NULL)
94 return KASUMI_OP_ONLY_CIPHER;
95 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
96 return KASUMI_OP_CIPHER_AUTH;
98 return KASUMI_OP_NOT_SUPPORTED;
101 return KASUMI_OP_NOT_SUPPORTED;
105 /** Parse crypto xform chain and set private session parameters. */
107 kasumi_set_session_parameters(struct kasumi_session *sess,
108 const struct rte_crypto_sym_xform *xform)
110 const struct rte_crypto_sym_xform *auth_xform = NULL;
111 const struct rte_crypto_sym_xform *cipher_xform = NULL;
114 /* Select Crypto operation - hash then cipher / cipher then hash */
115 mode = kasumi_get_mode(xform);
118 case KASUMI_OP_CIPHER_AUTH:
119 auth_xform = xform->next;
121 case KASUMI_OP_ONLY_CIPHER:
122 cipher_xform = xform;
124 case KASUMI_OP_AUTH_CIPHER:
125 cipher_xform = xform->next;
127 case KASUMI_OP_ONLY_AUTH:
131 if (mode == KASUMI_OP_NOT_SUPPORTED) {
132 KASUMI_LOG_ERR("Unsupported operation chain order parameter");
137 /* Only KASUMI F8 supported */
138 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
141 sso_kasumi_init_f8_key_sched(xform->cipher.key.data,
142 &sess->pKeySched_cipher);
146 /* Only KASUMI F9 supported */
147 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
149 sess->auth_op = auth_xform->auth.op;
151 sso_kasumi_init_f9_key_sched(xform->auth.key.data,
152 &sess->pKeySched_hash);
161 /** Get KASUMI session. */
162 static struct kasumi_session *
163 kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
165 struct kasumi_session *sess;
167 if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
168 if (unlikely(op->sym->session->dev_type !=
169 RTE_CRYPTODEV_KASUMI_PMD))
172 sess = (struct kasumi_session *)op->sym->session->_private;
174 struct rte_cryptodev_session *c_sess = NULL;
176 if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
179 sess = (struct kasumi_session *)c_sess->_private;
181 if (unlikely(kasumi_set_session_parameters(sess,
182 op->sym->xform) != 0))
189 /** Encrypt/decrypt mbufs with same cipher key. */
191 process_kasumi_cipher_op(struct rte_crypto_op **ops,
192 struct kasumi_session *session,
196 uint8_t processed_ops = 0;
197 uint8_t *src[num_ops], *dst[num_ops];
198 uint64_t IV[num_ops];
199 uint32_t num_bytes[num_ops];
201 for (i = 0; i < num_ops; i++) {
203 if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
204 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
205 KASUMI_LOG_ERR("iv");
209 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
210 (ops[i]->sym->cipher.data.offset >> 3);
211 dst[i] = ops[i]->sym->m_dst ?
212 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
213 (ops[i]->sym->cipher.data.offset >> 3) :
214 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
215 (ops[i]->sym->cipher.data.offset >> 3);
216 IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data));
217 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
222 if (processed_ops != 0)
223 sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV,
224 src, dst, num_bytes, processed_ops);
226 return processed_ops;
229 /** Encrypt/decrypt mbuf (bit level function). */
231 process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
232 struct kasumi_session *session)
236 uint32_t length_in_bits, offset_in_bits;
239 if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) {
240 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
241 KASUMI_LOG_ERR("iv");
245 offset_in_bits = op->sym->cipher.data.offset;
246 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
247 dst = op->sym->m_dst ?
248 rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) :
249 rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
250 IV = *((uint64_t *)(op->sym->cipher.iv.data));
251 length_in_bits = op->sym->cipher.data.length;
253 sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
254 src, dst, length_in_bits, offset_in_bits);
259 /** Generate/verify hash from mbufs with same hash key. */
261 process_kasumi_hash_op(struct rte_crypto_op **ops,
262 struct kasumi_session *session,
266 uint8_t processed_ops = 0;
268 uint32_t length_in_bits;
274 for (i = 0; i < num_ops; i++) {
275 if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) {
276 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
277 KASUMI_LOG_ERR("aad");
281 if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) {
282 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
283 KASUMI_LOG_ERR("digest");
287 /* Data must be byte aligned */
288 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
289 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
290 KASUMI_LOG_ERR("offset");
294 length_in_bits = ops[i]->sym->auth.data.length;
296 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
297 (ops[i]->sym->auth.data.offset >> 3);
299 IV = *((uint64_t *)(ops[i]->sym->auth.aad.data));
300 /* Direction from next bit after end of message */
301 num_bytes = (length_in_bits >> 3) + 1;
302 shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN;
303 direction = (src[num_bytes - 1] >> shift_bits) & 0x01;
305 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
306 dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
307 ops[i]->sym->auth.digest.length);
309 sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
311 length_in_bits, dst, direction);
313 if (memcmp(dst, ops[i]->sym->auth.digest.data,
314 ops[i]->sym->auth.digest.length) != 0)
315 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
317 /* Trim area used for digest from mbuf. */
318 rte_pktmbuf_trim(ops[i]->sym->m_src,
319 ops[i]->sym->auth.digest.length);
321 dst = ops[i]->sym->auth.digest.data;
323 sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
325 length_in_bits, dst, direction);
330 return processed_ops;
333 /** Process a batch of crypto ops which shares the same session. */
335 process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
336 struct kasumi_qp *qp, uint8_t num_ops,
337 uint16_t *accumulated_enqueued_ops)
340 unsigned enqueued_ops, processed_ops;
342 switch (session->op) {
343 case KASUMI_OP_ONLY_CIPHER:
344 processed_ops = process_kasumi_cipher_op(ops,
347 case KASUMI_OP_ONLY_AUTH:
348 processed_ops = process_kasumi_hash_op(ops, session,
351 case KASUMI_OP_CIPHER_AUTH:
352 processed_ops = process_kasumi_cipher_op(ops, session,
354 process_kasumi_hash_op(ops, session, processed_ops);
356 case KASUMI_OP_AUTH_CIPHER:
357 processed_ops = process_kasumi_hash_op(ops, session,
359 process_kasumi_cipher_op(ops, session, processed_ops);
362 /* Operation not supported. */
366 for (i = 0; i < num_ops; i++) {
368 * If there was no error/authentication failure,
369 * change status to successful.
371 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
372 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
373 /* Free session if a session-less crypto op. */
374 if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
375 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
376 ops[i]->sym->session = NULL;
380 enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
381 (void **)ops, processed_ops);
382 qp->qp_stats.enqueued_count += enqueued_ops;
383 *accumulated_enqueued_ops += enqueued_ops;
388 /** Process a crypto op with length/offset in bits. */
390 process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
391 struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
393 unsigned enqueued_op, processed_op;
395 switch (session->op) {
396 case KASUMI_OP_ONLY_CIPHER:
397 processed_op = process_kasumi_cipher_op_bit(op,
400 case KASUMI_OP_ONLY_AUTH:
401 processed_op = process_kasumi_hash_op(&op, session, 1);
403 case KASUMI_OP_CIPHER_AUTH:
404 processed_op = process_kasumi_cipher_op_bit(op, session);
405 if (processed_op == 1)
406 process_kasumi_hash_op(&op, session, 1);
408 case KASUMI_OP_AUTH_CIPHER:
409 processed_op = process_kasumi_hash_op(&op, session, 1);
410 if (processed_op == 1)
411 process_kasumi_cipher_op_bit(op, session);
414 /* Operation not supported. */
419 * If there was no error/authentication failure,
420 * change status to successful.
422 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
423 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
425 /* Free session if a session-less crypto op. */
426 if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
427 rte_mempool_put(qp->sess_mp, op->sym->session);
428 op->sym->session = NULL;
431 enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
433 qp->qp_stats.enqueued_count += enqueued_op;
434 *accumulated_enqueued_ops += enqueued_op;
440 kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
443 struct rte_crypto_op *c_ops[nb_ops];
444 struct rte_crypto_op *curr_c_op;
446 struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
447 struct kasumi_qp *qp = queue_pair;
449 uint8_t burst_size = 0;
450 uint16_t enqueued_ops = 0;
451 uint8_t processed_ops;
453 for (i = 0; i < nb_ops; i++) {
456 /* Set status as enqueued (not processed yet) by default. */
457 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
459 curr_sess = kasumi_get_session(qp, curr_c_op);
460 if (unlikely(curr_sess == NULL ||
461 curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
463 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
467 /* If length/offset is at bit-level, process this buffer alone. */
468 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
469 || ((ops[i]->sym->cipher.data.offset
471 /* Process the ops of the previous session. */
472 if (prev_sess != NULL) {
473 processed_ops = process_ops(c_ops, prev_sess,
474 qp, burst_size, &enqueued_ops);
475 if (processed_ops < burst_size) {
484 processed_ops = process_op_bit(curr_c_op, curr_sess,
486 if (processed_ops != 1)
492 /* Batch ops that share the same session. */
493 if (prev_sess == NULL) {
494 prev_sess = curr_sess;
495 c_ops[burst_size++] = curr_c_op;
496 } else if (curr_sess == prev_sess) {
497 c_ops[burst_size++] = curr_c_op;
499 * When there are enough ops to process in a batch,
500 * process them, and start a new batch.
502 if (burst_size == KASUMI_MAX_BURST) {
503 processed_ops = process_ops(c_ops, prev_sess,
504 qp, burst_size, &enqueued_ops);
505 if (processed_ops < burst_size) {
515 * Different session, process the ops
516 * of the previous session.
518 processed_ops = process_ops(c_ops, prev_sess,
519 qp, burst_size, &enqueued_ops);
520 if (processed_ops < burst_size) {
526 prev_sess = curr_sess;
528 c_ops[burst_size++] = curr_c_op;
532 if (burst_size != 0) {
533 /* Process the crypto ops of the last session. */
534 processed_ops = process_ops(c_ops, prev_sess,
535 qp, burst_size, &enqueued_ops);
538 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
543 kasumi_pmd_dequeue_burst(void *queue_pair,
544 struct rte_crypto_op **c_ops, uint16_t nb_ops)
546 struct kasumi_qp *qp = queue_pair;
548 unsigned nb_dequeued;
550 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
551 (void **)c_ops, nb_ops);
552 qp->qp_stats.dequeued_count += nb_dequeued;
557 static int cryptodev_kasumi_uninit(const char *name);
560 cryptodev_kasumi_create(const char *name,
561 struct rte_crypto_vdev_init_params *init_params)
563 struct rte_cryptodev *dev;
564 char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
565 struct kasumi_private *internals;
566 uint64_t cpu_flags = 0;
568 /* Check CPU for supported vector instruction set */
569 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
570 cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
571 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
572 cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
574 KASUMI_LOG_ERR("Vector instructions are not supported by CPU");
578 /* Create a unique device name. */
579 if (create_unique_device_name(crypto_dev_name,
580 RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
581 KASUMI_LOG_ERR("failed to create unique cryptodev name");
585 dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
586 sizeof(struct kasumi_private), init_params->socket_id);
588 KASUMI_LOG_ERR("failed to create cryptodev vdev");
592 dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD;
593 dev->dev_ops = rte_kasumi_pmd_ops;
595 /* Register RX/TX burst functions for data path. */
596 dev->dequeue_burst = kasumi_pmd_dequeue_burst;
597 dev->enqueue_burst = kasumi_pmd_enqueue_burst;
599 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
600 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
603 internals = dev->data->dev_private;
605 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
606 internals->max_nb_sessions = init_params->max_nb_sessions;
610 KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name);
612 cryptodev_kasumi_uninit(crypto_dev_name);
617 cryptodev_kasumi_init(const char *name,
618 const char *input_args)
620 struct rte_crypto_vdev_init_params init_params = {
621 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
622 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
626 rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
628 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
629 init_params.socket_id);
630 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
631 init_params.max_nb_queue_pairs);
632 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
633 init_params.max_nb_sessions);
635 return cryptodev_kasumi_create(name, &init_params);
639 cryptodev_kasumi_uninit(const char *name)
644 RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s"
645 " on numa socket %u\n",
646 name, rte_socket_id());
651 static struct rte_driver cryptodev_kasumi_pmd_drv = {
652 .name = CRYPTODEV_NAME_KASUMI_PMD,
654 .init = cryptodev_kasumi_init,
655 .uninit = cryptodev_kasumi_uninit
658 PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv);