1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_cryptodev.h>
8 #include <cryptodev_pmd.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_malloc.h>
11 #include <rte_cpuflags.h>
13 #include "snow3g_pmd_private.h"
15 #define SNOW3G_IV_LENGTH 16
16 #define SNOW3G_MAX_BURST 8
19 static uint8_t cryptodev_driver_id;
21 /** Get xform chain order. */
22 static enum snow3g_operation
23 snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
26 return SNOW3G_OP_NOT_SUPPORTED;
29 if (xform->next->next != NULL)
30 return SNOW3G_OP_NOT_SUPPORTED;
32 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
33 if (xform->next == NULL)
34 return SNOW3G_OP_ONLY_AUTH;
35 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
36 return SNOW3G_OP_AUTH_CIPHER;
38 return SNOW3G_OP_NOT_SUPPORTED;
41 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
42 if (xform->next == NULL)
43 return SNOW3G_OP_ONLY_CIPHER;
44 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
45 return SNOW3G_OP_CIPHER_AUTH;
47 return SNOW3G_OP_NOT_SUPPORTED;
50 return SNOW3G_OP_NOT_SUPPORTED;
54 /** Parse crypto xform chain and set private session parameters. */
56 snow3g_set_session_parameters(MB_MGR *mgr, struct snow3g_session *sess,
57 const struct rte_crypto_sym_xform *xform)
59 const struct rte_crypto_sym_xform *auth_xform = NULL;
60 const struct rte_crypto_sym_xform *cipher_xform = NULL;
61 enum snow3g_operation mode;
63 /* Select Crypto operation - hash then cipher / cipher then hash */
64 mode = snow3g_get_mode(xform);
67 case SNOW3G_OP_CIPHER_AUTH:
68 auth_xform = xform->next;
71 case SNOW3G_OP_ONLY_CIPHER:
74 case SNOW3G_OP_AUTH_CIPHER:
75 cipher_xform = xform->next;
77 case SNOW3G_OP_ONLY_AUTH:
80 case SNOW3G_OP_NOT_SUPPORTED:
82 SNOW3G_LOG(ERR, "Unsupported operation chain order parameter");
87 /* Only SNOW 3G UEA2 supported */
88 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
91 if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
92 SNOW3G_LOG(ERR, "Wrong IV length");
95 if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
96 SNOW3G_LOG(ERR, "Not enough memory to store the key");
100 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
103 IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
104 &sess->pKeySched_cipher);
108 /* Only SNOW 3G UIA2 supported */
109 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
112 if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
113 SNOW3G_LOG(ERR, "Wrong digest length");
116 if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
117 SNOW3G_LOG(ERR, "Not enough memory to store the key");
121 sess->auth_op = auth_xform->auth.op;
123 if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
124 SNOW3G_LOG(ERR, "Wrong IV length");
127 sess->auth_iv_offset = auth_xform->auth.iv.offset;
130 IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
131 &sess->pKeySched_hash);
139 /** Get SNOW 3G session. */
140 static struct snow3g_session *
141 snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
143 struct snow3g_session *sess = NULL;
145 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
146 if (likely(op->sym->session != NULL))
147 sess = (struct snow3g_session *)
148 get_sym_session_private_data(
150 cryptodev_driver_id);
153 void *_sess_private_data = NULL;
155 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
158 if (rte_mempool_get(qp->sess_mp_priv,
159 (void **)&_sess_private_data))
162 sess = (struct snow3g_session *)_sess_private_data;
164 if (unlikely(snow3g_set_session_parameters(qp->mgr, sess,
165 op->sym->xform) != 0)) {
166 rte_mempool_put(qp->sess_mp, _sess);
167 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
170 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
171 set_sym_session_private_data(op->sym->session,
172 cryptodev_driver_id, _sess_private_data);
175 if (unlikely(sess == NULL))
176 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
182 /** Encrypt/decrypt mbufs with same cipher key. */
184 process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
185 struct snow3g_session *session,
189 uint8_t processed_ops = 0;
190 const void *src[SNOW3G_MAX_BURST];
191 void *dst[SNOW3G_MAX_BURST];
192 const void *iv[SNOW3G_MAX_BURST];
193 uint32_t num_bytes[SNOW3G_MAX_BURST];
195 for (i = 0; i < num_ops; i++) {
196 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
197 (ops[i]->sym->cipher.data.offset >> 3);
198 dst[i] = ops[i]->sym->m_dst ?
199 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
200 (ops[i]->sym->cipher.data.offset >> 3) :
201 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
202 (ops[i]->sym->cipher.data.offset >> 3);
203 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
204 session->cipher_iv_offset);
205 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
210 IMB_SNOW3G_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
211 src, dst, num_bytes, processed_ops);
213 return processed_ops;
216 /** Encrypt/decrypt mbuf (bit level function). */
218 process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
219 struct rte_crypto_op *op,
220 struct snow3g_session *session)
224 uint32_t length_in_bits, offset_in_bits;
226 offset_in_bits = op->sym->cipher.data.offset;
227 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
228 if (op->sym->m_dst == NULL) {
229 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
230 SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
233 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
234 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
235 session->cipher_iv_offset);
236 length_in_bits = op->sym->cipher.data.length;
238 IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
239 src, dst, length_in_bits, offset_in_bits);
244 /** Generate/verify hash from mbufs with same hash key. */
246 process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
247 struct snow3g_session *session,
251 uint8_t processed_ops = 0;
253 uint32_t length_in_bits;
256 for (i = 0; i < num_ops; i++) {
257 /* Data must be byte aligned */
258 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
259 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
260 SNOW3G_LOG(ERR, "Offset");
264 length_in_bits = ops[i]->sym->auth.data.length;
266 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
267 (ops[i]->sym->auth.data.offset >> 3);
268 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
269 session->auth_iv_offset);
271 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
272 dst = qp->temp_digest;
274 IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
275 &session->pKeySched_hash,
276 iv, src, length_in_bits, dst);
278 if (memcmp(dst, ops[i]->sym->auth.digest.data,
279 SNOW3G_DIGEST_LENGTH) != 0)
280 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
282 dst = ops[i]->sym->auth.digest.data;
284 IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
285 &session->pKeySched_hash,
286 iv, src, length_in_bits, dst);
291 return processed_ops;
294 /** Process a batch of crypto ops which shares the same session. */
296 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
297 struct snow3g_qp *qp, uint8_t num_ops,
298 uint16_t *accumulated_enqueued_ops)
301 unsigned enqueued_ops, processed_ops;
303 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
304 for (i = 0; i < num_ops; i++) {
305 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
306 (ops[i]->sym->m_dst != NULL &&
307 !rte_pktmbuf_is_contiguous(
308 ops[i]->sym->m_dst))) {
309 SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, "
310 "op (%p) provides noncontiguous mbuf as "
311 "source/destination buffer.\n", ops[i]);
312 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
318 switch (session->op) {
319 case SNOW3G_OP_ONLY_CIPHER:
320 processed_ops = process_snow3g_cipher_op(qp, ops,
323 case SNOW3G_OP_ONLY_AUTH:
324 processed_ops = process_snow3g_hash_op(qp, ops, session,
327 case SNOW3G_OP_CIPHER_AUTH:
328 processed_ops = process_snow3g_cipher_op(qp, ops, session,
330 process_snow3g_hash_op(qp, ops, session, processed_ops);
332 case SNOW3G_OP_AUTH_CIPHER:
333 processed_ops = process_snow3g_hash_op(qp, ops, session,
335 process_snow3g_cipher_op(qp, ops, session, processed_ops);
338 /* Operation not supported. */
342 for (i = 0; i < num_ops; i++) {
344 * If there was no error/authentication failure,
345 * change status to successful.
347 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
348 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
349 /* Free session if a session-less crypto op. */
350 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
351 memset(session, 0, sizeof(struct snow3g_session));
352 memset(ops[i]->sym->session, 0,
353 rte_cryptodev_sym_get_existing_header_session_size(
354 ops[i]->sym->session));
355 rte_mempool_put(qp->sess_mp_priv, session);
356 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
357 ops[i]->sym->session = NULL;
361 enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
362 (void **)ops, processed_ops, NULL);
363 qp->qp_stats.enqueued_count += enqueued_ops;
364 *accumulated_enqueued_ops += enqueued_ops;
369 /** Process a crypto op with length/offset in bits. */
371 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
372 struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
374 unsigned enqueued_op, processed_op;
376 switch (session->op) {
377 case SNOW3G_OP_ONLY_CIPHER:
378 processed_op = process_snow3g_cipher_op_bit(qp, op,
381 case SNOW3G_OP_ONLY_AUTH:
382 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
384 case SNOW3G_OP_CIPHER_AUTH:
385 processed_op = process_snow3g_cipher_op_bit(qp, op, session);
386 if (processed_op == 1)
387 process_snow3g_hash_op(qp, &op, session, 1);
389 case SNOW3G_OP_AUTH_CIPHER:
390 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
391 if (processed_op == 1)
392 process_snow3g_cipher_op_bit(qp, op, session);
395 /* Operation not supported. */
400 * If there was no error/authentication failure,
401 * change status to successful.
403 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
404 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
406 /* Free session if a session-less crypto op. */
407 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
408 memset(op->sym->session, 0, sizeof(struct snow3g_session));
409 rte_cryptodev_sym_session_free(op->sym->session);
410 op->sym->session = NULL;
413 enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
414 (void **)&op, processed_op, NULL);
415 qp->qp_stats.enqueued_count += enqueued_op;
416 *accumulated_enqueued_ops += enqueued_op;
422 snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
425 struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
426 struct rte_crypto_op *curr_c_op;
428 struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
429 struct snow3g_qp *qp = queue_pair;
431 uint8_t burst_size = 0;
432 uint16_t enqueued_ops = 0;
433 uint8_t processed_ops;
435 for (i = 0; i < nb_ops; i++) {
438 /* Set status as enqueued (not processed yet) by default. */
439 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
441 curr_sess = snow3g_get_session(qp, curr_c_op);
442 if (unlikely(curr_sess == NULL ||
443 curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
445 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
449 /* If length/offset is at bit-level, process this buffer alone. */
450 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
451 || ((curr_c_op->sym->cipher.data.offset
453 /* Process the ops of the previous session. */
454 if (prev_sess != NULL) {
455 processed_ops = process_ops(c_ops, prev_sess,
456 qp, burst_size, &enqueued_ops);
457 if (processed_ops < burst_size) {
466 processed_ops = process_op_bit(curr_c_op, curr_sess,
468 if (processed_ops != 1)
474 /* Batch ops that share the same session. */
475 if (prev_sess == NULL) {
476 prev_sess = curr_sess;
477 c_ops[burst_size++] = curr_c_op;
478 } else if (curr_sess == prev_sess) {
479 c_ops[burst_size++] = curr_c_op;
481 * When there are enough ops to process in a batch,
482 * process them, and start a new batch.
484 if (burst_size == SNOW3G_MAX_BURST) {
485 processed_ops = process_ops(c_ops, prev_sess,
486 qp, burst_size, &enqueued_ops);
487 if (processed_ops < burst_size) {
497 * Different session, process the ops
498 * of the previous session.
500 processed_ops = process_ops(c_ops, prev_sess,
501 qp, burst_size, &enqueued_ops);
502 if (processed_ops < burst_size) {
508 prev_sess = curr_sess;
510 c_ops[burst_size++] = curr_c_op;
514 if (burst_size != 0) {
515 /* Process the crypto ops of the last session. */
516 processed_ops = process_ops(c_ops, prev_sess,
517 qp, burst_size, &enqueued_ops);
520 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
525 snow3g_pmd_dequeue_burst(void *queue_pair,
526 struct rte_crypto_op **c_ops, uint16_t nb_ops)
528 struct snow3g_qp *qp = queue_pair;
530 unsigned nb_dequeued;
532 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
533 (void **)c_ops, nb_ops, NULL);
534 qp->qp_stats.dequeued_count += nb_dequeued;
539 static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
542 cryptodev_snow3g_create(const char *name,
543 struct rte_vdev_device *vdev,
544 struct rte_cryptodev_pmd_init_params *init_params)
546 struct rte_cryptodev *dev;
547 struct snow3g_private *internals;
550 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
552 SNOW3G_LOG(ERR, "failed to create cryptodev vdev");
556 dev->driver_id = cryptodev_driver_id;
557 dev->dev_ops = rte_snow3g_pmd_ops;
559 /* Register RX/TX burst functions for data path. */
560 dev->dequeue_burst = snow3g_pmd_dequeue_burst;
561 dev->enqueue_burst = snow3g_pmd_enqueue_burst;
563 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
564 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
565 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
566 RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
567 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
569 mgr = alloc_mb_mgr(0);
573 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
574 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
575 init_mb_mgr_avx2(mgr);
576 } else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
577 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
578 init_mb_mgr_avx(mgr);
580 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
581 init_mb_mgr_sse(mgr);
584 internals = dev->data->dev_private;
585 internals->mgr = mgr;
587 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
591 SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed",
594 cryptodev_snow3g_remove(vdev);
599 cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
601 struct rte_cryptodev_pmd_init_params init_params = {
603 sizeof(struct snow3g_private),
605 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
608 const char *input_args;
610 name = rte_vdev_device_name(vdev);
613 input_args = rte_vdev_device_args(vdev);
615 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
617 return cryptodev_snow3g_create(name, vdev, &init_params);
621 cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
623 struct rte_cryptodev *cryptodev;
625 struct snow3g_private *internals;
627 name = rte_vdev_device_name(vdev);
631 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
632 if (cryptodev == NULL)
635 internals = cryptodev->data->dev_private;
637 free_mb_mgr(internals->mgr);
639 return rte_cryptodev_pmd_destroy(cryptodev);
642 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
643 .probe = cryptodev_snow3g_probe,
644 .remove = cryptodev_snow3g_remove
647 static struct cryptodev_driver snow3g_crypto_drv;
649 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
650 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
651 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
652 "max_nb_queue_pairs=<int> "
654 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
655 cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
656 RTE_LOG_REGISTER_DEFAULT(snow3g_logtype_driver, INFO);