1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_malloc.h>
11 #include <rte_cpuflags.h>
13 #include "kasumi_pmd_private.h"
15 #define KASUMI_KEY_LENGTH 16
16 #define KASUMI_IV_LENGTH 8
17 #define KASUMI_MAX_BURST 4
20 static uint8_t cryptodev_driver_id;
22 /** Get xform chain order. */
23 static enum kasumi_operation
24 kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
27 return KASUMI_OP_NOT_SUPPORTED;
30 if (xform->next->next != NULL)
31 return KASUMI_OP_NOT_SUPPORTED;
33 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
34 if (xform->next == NULL)
35 return KASUMI_OP_ONLY_AUTH;
36 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
37 return KASUMI_OP_AUTH_CIPHER;
39 return KASUMI_OP_NOT_SUPPORTED;
42 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
43 if (xform->next == NULL)
44 return KASUMI_OP_ONLY_CIPHER;
45 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
46 return KASUMI_OP_CIPHER_AUTH;
48 return KASUMI_OP_NOT_SUPPORTED;
51 return KASUMI_OP_NOT_SUPPORTED;
55 /** Parse crypto xform chain and set private session parameters. */
57 kasumi_set_session_parameters(MB_MGR *mgr, struct kasumi_session *sess,
58 const struct rte_crypto_sym_xform *xform)
60 const struct rte_crypto_sym_xform *auth_xform = NULL;
61 const struct rte_crypto_sym_xform *cipher_xform = NULL;
62 enum kasumi_operation mode;
64 /* Select Crypto operation - hash then cipher / cipher then hash */
65 mode = kasumi_get_mode(xform);
68 case KASUMI_OP_CIPHER_AUTH:
69 auth_xform = xform->next;
71 case KASUMI_OP_ONLY_CIPHER:
74 case KASUMI_OP_AUTH_CIPHER:
75 cipher_xform = xform->next;
77 case KASUMI_OP_ONLY_AUTH:
80 case KASUMI_OP_NOT_SUPPORTED:
82 KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
87 /* Only KASUMI F8 supported */
88 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
89 KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
93 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
94 if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
95 KASUMI_LOG(ERR, "Wrong IV length");
100 IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
101 &sess->pKeySched_cipher);
105 /* Only KASUMI F9 supported */
106 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
107 KASUMI_LOG(ERR, "Unsupported authentication");
111 if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
112 KASUMI_LOG(ERR, "Wrong digest length");
116 sess->auth_op = auth_xform->auth.op;
119 IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
120 &sess->pKeySched_hash);
129 /** Get KASUMI session. */
130 static struct kasumi_session *
131 kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
133 struct kasumi_session *sess = NULL;
135 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
136 if (likely(op->sym->session != NULL))
137 sess = (struct kasumi_session *)
138 get_sym_session_private_data(
140 cryptodev_driver_id);
143 void *_sess_private_data = NULL;
145 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
148 if (rte_mempool_get(qp->sess_mp_priv,
149 (void **)&_sess_private_data))
152 sess = (struct kasumi_session *)_sess_private_data;
154 if (unlikely(kasumi_set_session_parameters(qp->mgr, sess,
155 op->sym->xform) != 0)) {
156 rte_mempool_put(qp->sess_mp, _sess);
157 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
160 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
161 set_sym_session_private_data(op->sym->session,
162 cryptodev_driver_id, _sess_private_data);
165 if (unlikely(sess == NULL))
166 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
171 /** Encrypt/decrypt mbufs with same cipher key. */
173 process_kasumi_cipher_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
174 struct kasumi_session *session, uint8_t num_ops)
177 uint8_t processed_ops = 0;
178 const void *src[num_ops];
181 uint64_t iv[num_ops];
182 uint32_t num_bytes[num_ops];
184 for (i = 0; i < num_ops; i++) {
185 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
186 (ops[i]->sym->cipher.data.offset >> 3);
187 dst[i] = ops[i]->sym->m_dst ?
188 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
189 (ops[i]->sym->cipher.data.offset >> 3) :
190 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
191 (ops[i]->sym->cipher.data.offset >> 3);
192 iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
193 session->cipher_iv_offset);
194 iv[i] = *((uint64_t *)(iv_ptr));
195 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
200 if (processed_ops != 0)
201 IMB_KASUMI_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
202 src, dst, num_bytes, processed_ops);
204 return processed_ops;
207 /** Encrypt/decrypt mbuf (bit level function). */
209 process_kasumi_cipher_op_bit(struct kasumi_qp *qp, struct rte_crypto_op *op,
210 struct kasumi_session *session)
215 uint32_t length_in_bits, offset_in_bits;
217 offset_in_bits = op->sym->cipher.data.offset;
218 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
219 if (op->sym->m_dst == NULL)
222 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
223 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
224 session->cipher_iv_offset);
225 iv = *((uint64_t *)(iv_ptr));
226 length_in_bits = op->sym->cipher.data.length;
228 IMB_KASUMI_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
229 src, dst, length_in_bits, offset_in_bits);
234 /** Generate/verify hash from mbufs with same hash key. */
236 process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
237 struct kasumi_session *session,
241 uint8_t processed_ops = 0;
243 uint32_t length_in_bits;
246 for (i = 0; i < num_ops; i++) {
247 /* Data must be byte aligned */
248 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
249 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
250 KASUMI_LOG(ERR, "Invalid Offset");
254 length_in_bits = ops[i]->sym->auth.data.length;
256 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
257 (ops[i]->sym->auth.data.offset >> 3);
258 /* Direction from next bit after end of message */
259 num_bytes = length_in_bits >> 3;
261 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
262 dst = qp->temp_digest;
263 IMB_KASUMI_F9_1_BUFFER(qp->mgr,
264 &session->pKeySched_hash, src,
268 if (memcmp(dst, ops[i]->sym->auth.digest.data,
269 KASUMI_DIGEST_LENGTH) != 0)
270 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
272 dst = ops[i]->sym->auth.digest.data;
274 IMB_KASUMI_F9_1_BUFFER(qp->mgr,
275 &session->pKeySched_hash, src,
281 return processed_ops;
284 /** Process a batch of crypto ops which shares the same session. */
286 process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
287 struct kasumi_qp *qp, uint8_t num_ops,
288 uint16_t *accumulated_enqueued_ops)
291 unsigned enqueued_ops, processed_ops;
293 switch (session->op) {
294 case KASUMI_OP_ONLY_CIPHER:
295 processed_ops = process_kasumi_cipher_op(qp, ops,
298 case KASUMI_OP_ONLY_AUTH:
299 processed_ops = process_kasumi_hash_op(qp, ops, session,
302 case KASUMI_OP_CIPHER_AUTH:
303 processed_ops = process_kasumi_cipher_op(qp, ops, session,
305 process_kasumi_hash_op(qp, ops, session, processed_ops);
307 case KASUMI_OP_AUTH_CIPHER:
308 processed_ops = process_kasumi_hash_op(qp, ops, session,
310 process_kasumi_cipher_op(qp, ops, session, processed_ops);
313 /* Operation not supported. */
317 for (i = 0; i < num_ops; i++) {
319 * If there was no error/authentication failure,
320 * change status to successful.
322 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
323 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
324 /* Free session if a session-less crypto op. */
325 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
326 memset(session, 0, sizeof(struct kasumi_session));
327 memset(ops[i]->sym->session, 0,
328 rte_cryptodev_sym_get_existing_header_session_size(
329 ops[i]->sym->session));
330 rte_mempool_put(qp->sess_mp_priv, session);
331 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
332 ops[i]->sym->session = NULL;
336 enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
337 (void **)ops, processed_ops, NULL);
338 qp->qp_stats.enqueued_count += enqueued_ops;
339 *accumulated_enqueued_ops += enqueued_ops;
344 /** Process a crypto op with length/offset in bits. */
346 process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
347 struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
349 unsigned enqueued_op, processed_op;
351 switch (session->op) {
352 case KASUMI_OP_ONLY_CIPHER:
353 processed_op = process_kasumi_cipher_op_bit(qp, op,
356 case KASUMI_OP_ONLY_AUTH:
357 processed_op = process_kasumi_hash_op(qp, &op, session, 1);
359 case KASUMI_OP_CIPHER_AUTH:
360 processed_op = process_kasumi_cipher_op_bit(qp, op, session);
361 if (processed_op == 1)
362 process_kasumi_hash_op(qp, &op, session, 1);
364 case KASUMI_OP_AUTH_CIPHER:
365 processed_op = process_kasumi_hash_op(qp, &op, session, 1);
366 if (processed_op == 1)
367 process_kasumi_cipher_op_bit(qp, op, session);
370 /* Operation not supported. */
375 * If there was no error/authentication failure,
376 * change status to successful.
378 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
379 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
381 /* Free session if a session-less crypto op. */
382 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
383 memset(op->sym->session, 0, sizeof(struct kasumi_session));
384 rte_cryptodev_sym_session_free(op->sym->session);
385 op->sym->session = NULL;
388 enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
390 qp->qp_stats.enqueued_count += enqueued_op;
391 *accumulated_enqueued_ops += enqueued_op;
397 kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
400 struct rte_crypto_op *c_ops[nb_ops];
401 struct rte_crypto_op *curr_c_op;
403 struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
404 struct kasumi_qp *qp = queue_pair;
406 uint8_t burst_size = 0;
407 uint16_t enqueued_ops = 0;
408 uint8_t processed_ops;
410 for (i = 0; i < nb_ops; i++) {
413 #ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
414 if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
415 (curr_c_op->sym->m_dst != NULL &&
416 !rte_pktmbuf_is_contiguous(
417 curr_c_op->sym->m_dst))) {
418 KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
419 "op (%p) provides noncontiguous mbuf as "
420 "source/destination buffer.", curr_c_op);
421 curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
426 /* Set status as enqueued (not processed yet) by default. */
427 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
429 curr_sess = kasumi_get_session(qp, curr_c_op);
430 if (unlikely(curr_sess == NULL ||
431 curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
433 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
437 /* If length/offset is at bit-level, process this buffer alone. */
438 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
439 || ((ops[i]->sym->cipher.data.offset
441 /* Process the ops of the previous session. */
442 if (prev_sess != NULL) {
443 processed_ops = process_ops(c_ops, prev_sess,
444 qp, burst_size, &enqueued_ops);
445 if (processed_ops < burst_size) {
454 processed_ops = process_op_bit(curr_c_op, curr_sess,
456 if (processed_ops != 1)
462 /* Batch ops that share the same session. */
463 if (prev_sess == NULL) {
464 prev_sess = curr_sess;
465 c_ops[burst_size++] = curr_c_op;
466 } else if (curr_sess == prev_sess) {
467 c_ops[burst_size++] = curr_c_op;
469 * When there are enough ops to process in a batch,
470 * process them, and start a new batch.
472 if (burst_size == KASUMI_MAX_BURST) {
473 processed_ops = process_ops(c_ops, prev_sess,
474 qp, burst_size, &enqueued_ops);
475 if (processed_ops < burst_size) {
485 * Different session, process the ops
486 * of the previous session.
488 processed_ops = process_ops(c_ops, prev_sess,
489 qp, burst_size, &enqueued_ops);
490 if (processed_ops < burst_size) {
496 prev_sess = curr_sess;
498 c_ops[burst_size++] = curr_c_op;
502 if (burst_size != 0) {
503 /* Process the crypto ops of the last session. */
504 processed_ops = process_ops(c_ops, prev_sess,
505 qp, burst_size, &enqueued_ops);
508 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
513 kasumi_pmd_dequeue_burst(void *queue_pair,
514 struct rte_crypto_op **c_ops, uint16_t nb_ops)
516 struct kasumi_qp *qp = queue_pair;
518 unsigned nb_dequeued;
520 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
521 (void **)c_ops, nb_ops, NULL);
522 qp->qp_stats.dequeued_count += nb_dequeued;
527 static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
530 cryptodev_kasumi_create(const char *name,
531 struct rte_vdev_device *vdev,
532 struct rte_cryptodev_pmd_init_params *init_params)
534 struct rte_cryptodev *dev;
535 struct kasumi_private *internals;
538 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
540 KASUMI_LOG(ERR, "failed to create cryptodev vdev");
544 dev->driver_id = cryptodev_driver_id;
545 dev->dev_ops = rte_kasumi_pmd_ops;
547 /* Register RX/TX burst functions for data path. */
548 dev->dequeue_burst = kasumi_pmd_dequeue_burst;
549 dev->enqueue_burst = kasumi_pmd_enqueue_burst;
551 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
552 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
553 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
554 RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
555 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
557 mgr = alloc_mb_mgr(0);
561 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
562 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
563 init_mb_mgr_avx(mgr);
565 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
566 init_mb_mgr_sse(mgr);
569 internals = dev->data->dev_private;
571 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
572 internals->mgr = mgr;
576 KASUMI_LOG(ERR, "driver %s: failed",
579 cryptodev_kasumi_remove(vdev);
584 cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
586 struct rte_cryptodev_pmd_init_params init_params = {
588 sizeof(struct kasumi_private),
590 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
593 const char *input_args;
595 name = rte_vdev_device_name(vdev);
598 input_args = rte_vdev_device_args(vdev);
600 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
602 return cryptodev_kasumi_create(name, vdev, &init_params);
606 cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
608 struct rte_cryptodev *cryptodev;
610 struct kasumi_private *internals;
612 name = rte_vdev_device_name(vdev);
616 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
617 if (cryptodev == NULL)
620 internals = cryptodev->data->dev_private;
622 free_mb_mgr(internals->mgr);
624 return rte_cryptodev_pmd_destroy(cryptodev);
627 static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
628 .probe = cryptodev_kasumi_probe,
629 .remove = cryptodev_kasumi_remove
632 static struct cryptodev_driver kasumi_crypto_drv;
634 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
635 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
636 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
637 "max_nb_queue_pairs=<int> "
639 RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
640 cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
642 RTE_LOG_REGISTER(kasumi_logtype_driver, pmd.crypto.kasumi, NOTICE);