4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
42 #include "rte_snow3g_pmd_private.h"
44 #define SNOW3G_IV_LENGTH 16
45 #define SNOW3G_MAX_BURST 8
48 static uint8_t cryptodev_driver_id;
50 /** Get xform chain order. */
51 static enum snow3g_operation
52 snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
55 return SNOW3G_OP_NOT_SUPPORTED;
58 if (xform->next->next != NULL)
59 return SNOW3G_OP_NOT_SUPPORTED;
61 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
62 if (xform->next == NULL)
63 return SNOW3G_OP_ONLY_AUTH;
64 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
65 return SNOW3G_OP_AUTH_CIPHER;
67 return SNOW3G_OP_NOT_SUPPORTED;
70 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
71 if (xform->next == NULL)
72 return SNOW3G_OP_ONLY_CIPHER;
73 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
74 return SNOW3G_OP_CIPHER_AUTH;
76 return SNOW3G_OP_NOT_SUPPORTED;
79 return SNOW3G_OP_NOT_SUPPORTED;
83 /** Parse crypto xform chain and set private session parameters. */
85 snow3g_set_session_parameters(struct snow3g_session *sess,
86 const struct rte_crypto_sym_xform *xform)
88 const struct rte_crypto_sym_xform *auth_xform = NULL;
89 const struct rte_crypto_sym_xform *cipher_xform = NULL;
90 enum snow3g_operation mode;
92 /* Select Crypto operation - hash then cipher / cipher then hash */
93 mode = snow3g_get_mode(xform);
96 case SNOW3G_OP_CIPHER_AUTH:
97 auth_xform = xform->next;
100 case SNOW3G_OP_ONLY_CIPHER:
101 cipher_xform = xform;
103 case SNOW3G_OP_AUTH_CIPHER:
104 cipher_xform = xform->next;
106 case SNOW3G_OP_ONLY_AUTH:
109 case SNOW3G_OP_NOT_SUPPORTED:
111 SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
116 /* Only SNOW 3G UEA2 supported */
117 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
120 if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
121 SNOW3G_LOG_ERR("Wrong IV length");
124 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
127 sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
128 &sess->pKeySched_cipher);
132 /* Only SNOW 3G UIA2 supported */
133 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
136 if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
137 SNOW3G_LOG_ERR("Wrong digest length");
141 sess->auth_op = auth_xform->auth.op;
143 if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
144 SNOW3G_LOG_ERR("Wrong IV length");
147 sess->auth_iv_offset = auth_xform->auth.iv.offset;
150 sso_snow3g_init_key_sched(auth_xform->auth.key.data,
151 &sess->pKeySched_hash);
160 /** Get SNOW 3G session. */
161 static struct snow3g_session *
162 snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
164 struct snow3g_session *sess = NULL;
166 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
167 if (likely(op->sym->session != NULL))
168 sess = (struct snow3g_session *)
169 get_session_private_data(
171 cryptodev_driver_id);
174 void *_sess_private_data = NULL;
176 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
179 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
182 sess = (struct snow3g_session *)_sess_private_data;
184 if (unlikely(snow3g_set_session_parameters(sess,
185 op->sym->xform) != 0)) {
186 rte_mempool_put(qp->sess_mp, _sess);
187 rte_mempool_put(qp->sess_mp, _sess_private_data);
190 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
191 set_session_private_data(op->sym->session, cryptodev_driver_id,
195 if (unlikely(sess == NULL))
196 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
202 /** Encrypt/decrypt mbufs with same cipher key. */
204 process_snow3g_cipher_op(struct rte_crypto_op **ops,
205 struct snow3g_session *session,
209 uint8_t processed_ops = 0;
210 uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
211 uint8_t *iv[SNOW3G_MAX_BURST];
212 uint32_t num_bytes[SNOW3G_MAX_BURST];
214 for (i = 0; i < num_ops; i++) {
215 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
216 (ops[i]->sym->cipher.data.offset >> 3);
217 dst[i] = ops[i]->sym->m_dst ?
218 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
219 (ops[i]->sym->cipher.data.offset >> 3) :
220 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
221 (ops[i]->sym->cipher.data.offset >> 3);
222 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
223 session->cipher_iv_offset);
224 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
229 sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, iv, src, dst,
230 num_bytes, processed_ops);
232 return processed_ops;
235 /** Encrypt/decrypt mbuf (bit level function). */
237 process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
238 struct snow3g_session *session)
242 uint32_t length_in_bits, offset_in_bits;
244 offset_in_bits = op->sym->cipher.data.offset;
245 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
246 if (op->sym->m_dst == NULL) {
247 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
248 SNOW3G_LOG_ERR("bit-level in-place not supported\n");
251 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
252 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
253 session->cipher_iv_offset);
254 length_in_bits = op->sym->cipher.data.length;
256 sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
257 src, dst, length_in_bits, offset_in_bits);
262 /** Generate/verify hash from mbufs with same hash key. */
264 process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
265 struct snow3g_session *session,
269 uint8_t processed_ops = 0;
271 uint32_t length_in_bits;
274 for (i = 0; i < num_ops; i++) {
275 /* Data must be byte aligned */
276 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
277 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
278 SNOW3G_LOG_ERR("Offset");
282 length_in_bits = ops[i]->sym->auth.data.length;
284 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
285 (ops[i]->sym->auth.data.offset >> 3);
286 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
287 session->auth_iv_offset);
289 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
290 dst = qp->temp_digest;
292 sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
294 length_in_bits, dst);
296 if (memcmp(dst, ops[i]->sym->auth.digest.data,
297 SNOW3G_DIGEST_LENGTH) != 0)
298 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
300 dst = ops[i]->sym->auth.digest.data;
302 sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
304 length_in_bits, dst);
309 return processed_ops;
312 /** Process a batch of crypto ops which shares the same session. */
314 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
315 struct snow3g_qp *qp, uint8_t num_ops,
316 uint16_t *accumulated_enqueued_ops)
319 unsigned enqueued_ops, processed_ops;
321 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
322 for (i = 0; i < num_ops; i++) {
323 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
324 (ops[i]->sym->m_dst != NULL &&
325 !rte_pktmbuf_is_contiguous(
326 ops[i]->sym->m_dst))) {
327 SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
328 "op (%p) provides noncontiguous mbuf as "
329 "source/destination buffer.\n", ops[i]);
330 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
336 switch (session->op) {
337 case SNOW3G_OP_ONLY_CIPHER:
338 processed_ops = process_snow3g_cipher_op(ops,
341 case SNOW3G_OP_ONLY_AUTH:
342 processed_ops = process_snow3g_hash_op(qp, ops, session,
345 case SNOW3G_OP_CIPHER_AUTH:
346 processed_ops = process_snow3g_cipher_op(ops, session,
348 process_snow3g_hash_op(qp, ops, session, processed_ops);
350 case SNOW3G_OP_AUTH_CIPHER:
351 processed_ops = process_snow3g_hash_op(qp, ops, session,
353 process_snow3g_cipher_op(ops, session, processed_ops);
356 /* Operation not supported. */
360 for (i = 0; i < num_ops; i++) {
362 * If there was no error/authentication failure,
363 * change status to successful.
365 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
366 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
367 /* Free session if a session-less crypto op. */
368 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
369 memset(session, 0, sizeof(struct snow3g_session));
370 memset(ops[i]->sym->session, 0,
371 rte_cryptodev_get_header_session_size());
372 rte_mempool_put(qp->sess_mp, session);
373 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
374 ops[i]->sym->session = NULL;
378 enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
379 (void **)ops, processed_ops, NULL);
380 qp->qp_stats.enqueued_count += enqueued_ops;
381 *accumulated_enqueued_ops += enqueued_ops;
386 /** Process a crypto op with length/offset in bits. */
388 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
389 struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
391 unsigned enqueued_op, processed_op;
393 switch (session->op) {
394 case SNOW3G_OP_ONLY_CIPHER:
395 processed_op = process_snow3g_cipher_op_bit(op,
398 case SNOW3G_OP_ONLY_AUTH:
399 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
401 case SNOW3G_OP_CIPHER_AUTH:
402 processed_op = process_snow3g_cipher_op_bit(op, session);
403 if (processed_op == 1)
404 process_snow3g_hash_op(qp, &op, session, 1);
406 case SNOW3G_OP_AUTH_CIPHER:
407 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
408 if (processed_op == 1)
409 process_snow3g_cipher_op_bit(op, session);
412 /* Operation not supported. */
417 * If there was no error/authentication failure,
418 * change status to successful.
420 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
421 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
423 /* Free session if a session-less crypto op. */
424 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
425 memset(op->sym->session, 0, sizeof(struct snow3g_session));
426 rte_cryptodev_sym_session_free(op->sym->session);
427 op->sym->session = NULL;
430 enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
431 (void **)&op, processed_op, NULL);
432 qp->qp_stats.enqueued_count += enqueued_op;
433 *accumulated_enqueued_ops += enqueued_op;
439 snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
442 struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
443 struct rte_crypto_op *curr_c_op;
445 struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
446 struct snow3g_qp *qp = queue_pair;
448 uint8_t burst_size = 0;
449 uint16_t enqueued_ops = 0;
450 uint8_t processed_ops;
452 for (i = 0; i < nb_ops; i++) {
455 /* Set status as enqueued (not processed yet) by default. */
456 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
458 curr_sess = snow3g_get_session(qp, curr_c_op);
459 if (unlikely(curr_sess == NULL ||
460 curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
462 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
466 /* If length/offset is at bit-level, process this buffer alone. */
467 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
468 || ((curr_c_op->sym->cipher.data.offset
470 /* Process the ops of the previous session. */
471 if (prev_sess != NULL) {
472 processed_ops = process_ops(c_ops, prev_sess,
473 qp, burst_size, &enqueued_ops);
474 if (processed_ops < burst_size) {
483 processed_ops = process_op_bit(curr_c_op, curr_sess,
485 if (processed_ops != 1)
491 /* Batch ops that share the same session. */
492 if (prev_sess == NULL) {
493 prev_sess = curr_sess;
494 c_ops[burst_size++] = curr_c_op;
495 } else if (curr_sess == prev_sess) {
496 c_ops[burst_size++] = curr_c_op;
498 * When there are enough ops to process in a batch,
499 * process them, and start a new batch.
501 if (burst_size == SNOW3G_MAX_BURST) {
502 processed_ops = process_ops(c_ops, prev_sess,
503 qp, burst_size, &enqueued_ops);
504 if (processed_ops < burst_size) {
514 * Different session, process the ops
515 * of the previous session.
517 processed_ops = process_ops(c_ops, prev_sess,
518 qp, burst_size, &enqueued_ops);
519 if (processed_ops < burst_size) {
525 prev_sess = curr_sess;
527 c_ops[burst_size++] = curr_c_op;
531 if (burst_size != 0) {
532 /* Process the crypto ops of the last session. */
533 processed_ops = process_ops(c_ops, prev_sess,
534 qp, burst_size, &enqueued_ops);
537 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
542 snow3g_pmd_dequeue_burst(void *queue_pair,
543 struct rte_crypto_op **c_ops, uint16_t nb_ops)
545 struct snow3g_qp *qp = queue_pair;
547 unsigned nb_dequeued;
549 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
550 (void **)c_ops, nb_ops, NULL);
551 qp->qp_stats.dequeued_count += nb_dequeued;
556 static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
559 cryptodev_snow3g_create(const char *name,
560 struct rte_vdev_device *vdev,
561 struct rte_cryptodev_pmd_init_params *init_params)
563 struct rte_cryptodev *dev;
564 struct snow3g_private *internals;
565 uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
567 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
569 SNOW3G_LOG_ERR("failed to create cryptodev vdev");
573 dev->driver_id = cryptodev_driver_id;
574 dev->dev_ops = rte_snow3g_pmd_ops;
576 /* Register RX/TX burst functions for data path. */
577 dev->dequeue_burst = snow3g_pmd_dequeue_burst;
578 dev->enqueue_burst = snow3g_pmd_enqueue_burst;
580 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
581 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
584 internals = dev->data->dev_private;
586 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
587 internals->max_nb_sessions = init_params->max_nb_sessions;
591 SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed",
594 cryptodev_snow3g_remove(vdev);
599 cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
601 struct rte_cryptodev_pmd_init_params init_params = {
603 sizeof(struct snow3g_private),
605 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
606 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
609 const char *input_args;
611 name = rte_vdev_device_name(vdev);
614 input_args = rte_vdev_device_args(vdev);
616 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
618 return cryptodev_snow3g_create(name, vdev, &init_params);
622 cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
624 struct rte_cryptodev *cryptodev;
627 name = rte_vdev_device_name(vdev);
631 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
632 if (cryptodev == NULL)
635 return rte_cryptodev_pmd_destroy(cryptodev);
638 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
639 .probe = cryptodev_snow3g_probe,
640 .remove = cryptodev_snow3g_remove
643 static struct cryptodev_driver snow3g_crypto_drv;
645 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
646 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
647 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
648 "max_nb_queue_pairs=<int> "
649 "max_nb_sessions=<int> "
651 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv,
652 cryptodev_driver_id);