1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_hexdump.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_malloc.h>
11 #include <rte_cpuflags.h>
13 #include "zuc_pmd_private.h"
14 #define ZUC_MAX_BURST 16
17 int zuc_logtype_driver;
18 static uint8_t cryptodev_driver_id;
20 /** Get xform chain order. */
21 static enum zuc_operation
22 zuc_get_mode(const struct rte_crypto_sym_xform *xform)
25 return ZUC_OP_NOT_SUPPORTED;
28 if (xform->next->next != NULL)
29 return ZUC_OP_NOT_SUPPORTED;
31 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
32 if (xform->next == NULL)
33 return ZUC_OP_ONLY_AUTH;
34 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
35 return ZUC_OP_AUTH_CIPHER;
37 return ZUC_OP_NOT_SUPPORTED;
40 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41 if (xform->next == NULL)
42 return ZUC_OP_ONLY_CIPHER;
43 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44 return ZUC_OP_CIPHER_AUTH;
46 return ZUC_OP_NOT_SUPPORTED;
49 return ZUC_OP_NOT_SUPPORTED;
53 /** Parse crypto xform chain and set private session parameters. */
55 zuc_set_session_parameters(struct zuc_session *sess,
56 const struct rte_crypto_sym_xform *xform)
58 const struct rte_crypto_sym_xform *auth_xform = NULL;
59 const struct rte_crypto_sym_xform *cipher_xform = NULL;
60 enum zuc_operation mode;
62 /* Select Crypto operation - hash then cipher / cipher then hash */
63 mode = zuc_get_mode(xform);
66 case ZUC_OP_CIPHER_AUTH:
67 auth_xform = xform->next;
70 case ZUC_OP_ONLY_CIPHER:
73 case ZUC_OP_AUTH_CIPHER:
74 cipher_xform = xform->next;
76 case ZUC_OP_ONLY_AUTH:
79 case ZUC_OP_NOT_SUPPORTED:
81 ZUC_LOG(ERR, "Unsupported operation chain order parameter");
86 /* Only ZUC EEA3 supported */
87 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
90 if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
91 ZUC_LOG(ERR, "Wrong IV length");
94 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
97 memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
102 /* Only ZUC EIA3 supported */
103 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
106 if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
107 ZUC_LOG(ERR, "Wrong digest length");
111 sess->auth_op = auth_xform->auth.op;
113 if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
114 ZUC_LOG(ERR, "Wrong IV length");
117 sess->auth_iv_offset = auth_xform->auth.iv.offset;
120 memcpy(sess->pKey_hash, auth_xform->auth.key.data,
130 /** Get ZUC session. */
131 static struct zuc_session *
132 zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
134 struct zuc_session *sess = NULL;
136 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
137 if (likely(op->sym->session != NULL))
138 sess = (struct zuc_session *)get_sym_session_private_data(
140 cryptodev_driver_id);
143 void *_sess_private_data = NULL;
145 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
148 if (rte_mempool_get(qp->sess_mp_priv,
149 (void **)&_sess_private_data))
152 sess = (struct zuc_session *)_sess_private_data;
154 if (unlikely(zuc_set_session_parameters(sess,
155 op->sym->xform) != 0)) {
156 rte_mempool_put(qp->sess_mp, _sess);
157 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
160 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
161 set_sym_session_private_data(op->sym->session,
162 cryptodev_driver_id, _sess_private_data);
165 if (unlikely(sess == NULL))
166 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
172 /** Encrypt/decrypt mbufs. */
174 process_zuc_cipher_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
175 struct zuc_session **sessions,
179 uint8_t processed_ops = 0;
180 const void *src[ZUC_MAX_BURST];
181 void *dst[ZUC_MAX_BURST];
182 const void *iv[ZUC_MAX_BURST];
183 uint32_t num_bytes[ZUC_MAX_BURST];
184 const void *cipher_keys[ZUC_MAX_BURST];
185 struct zuc_session *sess;
187 for (i = 0; i < num_ops; i++) {
188 if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
189 || ((ops[i]->sym->cipher.data.offset
191 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
192 ZUC_LOG(ERR, "Data Length or offset");
198 #ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
199 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
200 (ops[i]->sym->m_dst != NULL &&
201 !rte_pktmbuf_is_contiguous(
202 ops[i]->sym->m_dst))) {
203 ZUC_LOG(ERR, "PMD supports only contiguous mbufs, "
204 "op (%p) provides noncontiguous mbuf as "
205 "source/destination buffer.\n", ops[i]);
206 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
211 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
212 (ops[i]->sym->cipher.data.offset >> 3);
213 dst[i] = ops[i]->sym->m_dst ?
214 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
215 (ops[i]->sym->cipher.data.offset >> 3) :
216 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
217 (ops[i]->sym->cipher.data.offset >> 3);
218 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
219 sess->cipher_iv_offset);
220 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
222 cipher_keys[i] = sess->pKey_cipher;
227 IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
228 (const void **)iv, (const void **)src, (void **)dst,
229 num_bytes, processed_ops);
231 return processed_ops;
234 /** Generate/verify hash from mbufs. */
236 process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
237 struct zuc_session **sessions,
241 uint8_t processed_ops = 0;
242 uint8_t *src[ZUC_MAX_BURST];
243 uint32_t *dst[ZUC_MAX_BURST];
244 uint32_t length_in_bits[ZUC_MAX_BURST];
245 uint8_t *iv[ZUC_MAX_BURST];
246 const void *hash_keys[ZUC_MAX_BURST];
247 struct zuc_session *sess;
249 for (i = 0; i < num_ops; i++) {
250 /* Data must be byte aligned */
251 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
252 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
253 ZUC_LOG(ERR, "Offset");
259 length_in_bits[i] = ops[i]->sym->auth.data.length;
261 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
262 (ops[i]->sym->auth.data.offset >> 3);
263 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
264 sess->auth_iv_offset);
266 hash_keys[i] = sess->pKey_hash;
267 if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
268 dst[i] = (uint32_t *)qp->temp_digest;
270 dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
272 #if IMB_VERSION_NUM < IMB_VERSION(0, 53, 3)
273 IMB_ZUC_EIA3_1_BUFFER(qp->mb_mgr, hash_keys[i],
274 iv[i], src[i], length_in_bits[i], dst[i]);
279 #if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
280 IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
281 (const void * const *)iv, (const void * const *)src,
282 length_in_bits, dst, processed_ops);
286 * If tag needs to be verified, compare generated tag
289 for (i = 0; i < processed_ops; i++)
290 if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
291 if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
292 ZUC_DIGEST_LENGTH) != 0)
293 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
295 return processed_ops;
298 /** Process a batch of crypto ops which shares the same operation type. */
300 process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
301 struct zuc_session **sessions,
302 struct zuc_qp *qp, uint8_t num_ops,
303 uint16_t *accumulated_enqueued_ops)
306 unsigned enqueued_ops, processed_ops;
309 case ZUC_OP_ONLY_CIPHER:
310 processed_ops = process_zuc_cipher_op(qp, ops,
313 case ZUC_OP_ONLY_AUTH:
314 processed_ops = process_zuc_hash_op(qp, ops, sessions,
317 case ZUC_OP_CIPHER_AUTH:
318 processed_ops = process_zuc_cipher_op(qp, ops, sessions,
320 process_zuc_hash_op(qp, ops, sessions, processed_ops);
322 case ZUC_OP_AUTH_CIPHER:
323 processed_ops = process_zuc_hash_op(qp, ops, sessions,
325 process_zuc_cipher_op(qp, ops, sessions, processed_ops);
328 /* Operation not supported. */
332 for (i = 0; i < num_ops; i++) {
334 * If there was no error/authentication failure,
335 * change status to successful.
337 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
338 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
339 /* Free session if a session-less crypto op. */
340 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
341 memset(sessions[i], 0, sizeof(struct zuc_session));
342 memset(ops[i]->sym->session, 0,
343 rte_cryptodev_sym_get_existing_header_session_size(
344 ops[i]->sym->session));
345 rte_mempool_put(qp->sess_mp_priv, sessions[i]);
346 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
347 ops[i]->sym->session = NULL;
351 enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
352 (void **)ops, processed_ops, NULL);
353 qp->qp_stats.enqueued_count += enqueued_ops;
354 *accumulated_enqueued_ops += enqueued_ops;
360 zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
363 struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
364 struct rte_crypto_op *curr_c_op;
366 struct zuc_session *curr_sess;
367 struct zuc_session *sessions[ZUC_MAX_BURST];
368 enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
369 enum zuc_operation curr_zuc_op;
370 struct zuc_qp *qp = queue_pair;
372 uint8_t burst_size = 0;
373 uint16_t enqueued_ops = 0;
374 uint8_t processed_ops;
376 for (i = 0; i < nb_ops; i++) {
379 curr_sess = zuc_get_session(qp, curr_c_op);
380 if (unlikely(curr_sess == NULL)) {
382 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
386 curr_zuc_op = curr_sess->op;
389 * Batch ops that share the same operation type
390 * (cipher only, auth only...).
392 if (burst_size == 0) {
393 prev_zuc_op = curr_zuc_op;
394 c_ops[0] = curr_c_op;
395 sessions[0] = curr_sess;
397 } else if (curr_zuc_op == prev_zuc_op) {
398 c_ops[burst_size] = curr_c_op;
399 sessions[burst_size] = curr_sess;
402 * When there are enough ops to process in a batch,
403 * process them, and start a new batch.
405 if (burst_size == ZUC_MAX_BURST) {
406 processed_ops = process_ops(c_ops, curr_zuc_op,
407 sessions, qp, burst_size,
409 if (processed_ops < burst_size) {
418 * Different operation type, process the ops
419 * of the previous type.
421 processed_ops = process_ops(c_ops, prev_zuc_op,
422 sessions, qp, burst_size,
424 if (processed_ops < burst_size) {
430 prev_zuc_op = curr_zuc_op;
432 c_ops[0] = curr_c_op;
433 sessions[0] = curr_sess;
438 if (burst_size != 0) {
439 /* Process the crypto ops of the last operation type. */
440 processed_ops = process_ops(c_ops, prev_zuc_op,
441 sessions, qp, burst_size,
445 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
450 zuc_pmd_dequeue_burst(void *queue_pair,
451 struct rte_crypto_op **c_ops, uint16_t nb_ops)
453 struct zuc_qp *qp = queue_pair;
455 unsigned nb_dequeued;
457 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
458 (void **)c_ops, nb_ops, NULL);
459 qp->qp_stats.dequeued_count += nb_dequeued;
464 static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
467 cryptodev_zuc_create(const char *name,
468 struct rte_vdev_device *vdev,
469 struct rte_cryptodev_pmd_init_params *init_params)
471 struct rte_cryptodev *dev;
472 struct zuc_private *internals;
475 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
477 ZUC_LOG(ERR, "failed to create cryptodev vdev");
481 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
482 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
483 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
484 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
486 mb_mgr = alloc_mb_mgr(0);
490 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
491 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
492 init_mb_mgr_avx512(mb_mgr);
493 } else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
494 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
495 init_mb_mgr_avx2(mb_mgr);
496 } else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
497 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
498 init_mb_mgr_avx(mb_mgr);
500 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
501 init_mb_mgr_sse(mb_mgr);
504 dev->driver_id = cryptodev_driver_id;
505 dev->dev_ops = rte_zuc_pmd_ops;
507 /* Register RX/TX burst functions for data path. */
508 dev->dequeue_burst = zuc_pmd_dequeue_burst;
509 dev->enqueue_burst = zuc_pmd_enqueue_burst;
511 internals = dev->data->dev_private;
512 internals->mb_mgr = mb_mgr;
514 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
518 ZUC_LOG(ERR, "driver %s: failed",
521 cryptodev_zuc_remove(vdev);
526 cryptodev_zuc_probe(struct rte_vdev_device *vdev)
528 struct rte_cryptodev_pmd_init_params init_params = {
530 sizeof(struct zuc_private),
532 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
535 const char *input_args;
537 name = rte_vdev_device_name(vdev);
540 input_args = rte_vdev_device_args(vdev);
542 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
544 return cryptodev_zuc_create(name, vdev, &init_params);
548 cryptodev_zuc_remove(struct rte_vdev_device *vdev)
551 struct rte_cryptodev *cryptodev;
553 struct zuc_private *internals;
555 name = rte_vdev_device_name(vdev);
559 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
560 if (cryptodev == NULL)
563 internals = cryptodev->data->dev_private;
565 free_mb_mgr(internals->mb_mgr);
567 return rte_cryptodev_pmd_destroy(cryptodev);
570 static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
571 .probe = cryptodev_zuc_probe,
572 .remove = cryptodev_zuc_remove
575 static struct cryptodev_driver zuc_crypto_drv;
577 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
578 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
579 "max_nb_queue_pairs=<int> "
581 RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
582 cryptodev_driver_id);
584 RTE_INIT(zuc_init_log)
586 zuc_logtype_driver = rte_log_register("pmd.crypto.zuc");