1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2021 Intel Corporation
5 #include "pmd_snow3g_priv.h"
7 /** Parse crypto xform chain and set private session parameters. */
9 snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
10 const struct rte_crypto_sym_xform *xform)
12 struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
13 const struct rte_crypto_sym_xform *auth_xform = NULL;
14 const struct rte_crypto_sym_xform *cipher_xform = NULL;
15 enum ipsec_mb_operation mode;
17 /* Select Crypto operation - hash then cipher / cipher then hash */
18 int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
24 /* Only SNOW 3G UEA2 supported */
25 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
28 if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
29 IPSEC_MB_LOG(ERR, "Wrong IV length");
32 if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
33 IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
37 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
40 IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
41 &sess->pKeySched_cipher);
45 /* Only SNOW 3G UIA2 supported */
46 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
49 if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
50 IPSEC_MB_LOG(ERR, "Wrong digest length");
53 if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
54 IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
58 sess->auth_op = auth_xform->auth.op;
60 if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
61 IPSEC_MB_LOG(ERR, "Wrong IV length");
64 sess->auth_iv_offset = auth_xform->auth.iv.offset;
67 IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
68 &sess->pKeySched_hash);
76 /** Check if conditions are met for digest-appended operations */
78 snow3g_digest_appended_in_src(struct rte_crypto_op *op)
80 unsigned int auth_size, cipher_size;
82 auth_size = (op->sym->auth.data.offset >> 3) +
83 (op->sym->auth.data.length >> 3);
84 cipher_size = (op->sym->cipher.data.offset >> 3) +
85 (op->sym->cipher.data.length >> 3);
87 if (auth_size < cipher_size)
88 return rte_pktmbuf_mtod_offset(op->sym->m_src,
89 uint8_t *, auth_size);
94 /** Encrypt/decrypt mbufs with same cipher key. */
96 process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
97 struct snow3g_session *session,
101 uint8_t processed_ops = 0;
102 const void *src[SNOW3G_MAX_BURST] = {NULL};
103 void *dst[SNOW3G_MAX_BURST] = {NULL};
104 uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
105 const void *iv[SNOW3G_MAX_BURST] = {NULL};
106 uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
107 uint32_t cipher_off, cipher_len;
108 int unencrypted_bytes = 0;
110 for (i = 0; i < num_ops; i++) {
112 cipher_off = ops[i]->sym->cipher.data.offset >> 3;
113 cipher_len = ops[i]->sym->cipher.data.length >> 3;
114 src[i] = rte_pktmbuf_mtod_offset(
115 ops[i]->sym->m_src, uint8_t *, cipher_off);
117 /* If out-of-place operation */
118 if (ops[i]->sym->m_dst &&
119 ops[i]->sym->m_src != ops[i]->sym->m_dst) {
120 dst[i] = rte_pktmbuf_mtod_offset(
121 ops[i]->sym->m_dst, uint8_t *, cipher_off);
123 /* In case of out-of-place, auth-cipher operation
124 * with partial encryption of the digest, copy
125 * the remaining, unencrypted part.
127 if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
128 || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
130 (ops[i]->sym->auth.data.offset >> 3) +
131 (ops[i]->sym->auth.data.length >> 3) +
132 (SNOW3G_DIGEST_LENGTH) -
133 cipher_off - cipher_len;
134 if (unencrypted_bytes > 0)
136 rte_pktmbuf_mtod_offset(
137 ops[i]->sym->m_dst, uint8_t *,
138 cipher_off + cipher_len),
139 rte_pktmbuf_mtod_offset(
140 ops[i]->sym->m_src, uint8_t *,
141 cipher_off + cipher_len),
144 dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
145 uint8_t *, cipher_off);
147 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
148 session->cipher_iv_offset);
149 num_bytes[i] = cipher_len;
153 IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
154 src, dst, num_bytes, processed_ops);
156 /* Take care of the raw digest data in src buffer */
157 for (i = 0; i < num_ops; i++) {
158 if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
159 session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
160 ops[i]->sym->m_dst != NULL) {
162 snow3g_digest_appended_in_src(ops[i]);
163 /* Clear unencrypted digest from
166 if (digest_appended[i] != NULL)
167 memset(digest_appended[i],
168 0, SNOW3G_DIGEST_LENGTH);
171 return processed_ops;
174 /** Encrypt/decrypt mbuf (bit level function). */
176 process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
177 struct rte_crypto_op *op,
178 struct snow3g_session *session)
182 uint32_t length_in_bits, offset_in_bits;
183 int unencrypted_bytes = 0;
185 offset_in_bits = op->sym->cipher.data.offset;
186 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
187 if (op->sym->m_dst == NULL) {
188 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
189 IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
192 length_in_bits = op->sym->cipher.data.length;
193 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
194 /* In case of out-of-place, auth-cipher operation
195 * with partial encryption of the digest, copy
196 * the remaining, unencrypted part.
198 if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
199 session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
201 (op->sym->auth.data.offset >> 3) +
202 (op->sym->auth.data.length >> 3) +
203 (SNOW3G_DIGEST_LENGTH) -
204 (offset_in_bits >> 3) -
205 (length_in_bits >> 3);
206 if (unencrypted_bytes > 0)
208 rte_pktmbuf_mtod_offset(
209 op->sym->m_dst, uint8_t *,
210 (length_in_bits >> 3)),
211 rte_pktmbuf_mtod_offset(
212 op->sym->m_src, uint8_t *,
213 (length_in_bits >> 3)),
216 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
217 session->cipher_iv_offset);
219 IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
220 src, dst, length_in_bits, offset_in_bits);
225 /** Generate/verify hash from mbufs with same hash key. */
227 process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
228 struct snow3g_session *session,
232 uint8_t processed_ops = 0;
234 uint32_t length_in_bits;
236 uint8_t digest_appended = 0;
237 struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
239 for (i = 0; i < num_ops; i++) {
240 /* Data must be byte aligned */
241 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
242 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
243 IPSEC_MB_LOG(ERR, "Offset");
249 length_in_bits = ops[i]->sym->auth.data.length;
251 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
252 (ops[i]->sym->auth.data.offset >> 3);
253 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
254 session->auth_iv_offset);
256 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
257 dst = qp_data->temp_digest;
258 /* Handle auth cipher verify oop case*/
260 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
262 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
263 ops[i]->sym->m_dst != NULL)
264 src = rte_pktmbuf_mtod_offset(
265 ops[i]->sym->m_dst, uint8_t *,
266 ops[i]->sym->auth.data.offset >> 3);
268 IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
269 &session->pKeySched_hash,
270 iv, src, length_in_bits, dst);
272 if (memcmp(dst, ops[i]->sym->auth.digest.data,
273 SNOW3G_DIGEST_LENGTH) != 0)
275 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
278 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
280 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
281 dst = snow3g_digest_appended_in_src(ops[i]);
286 dst = ops[i]->sym->auth.digest.data;
288 IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
289 &session->pKeySched_hash,
290 iv, src, length_in_bits, dst);
292 /* Copy back digest from src to auth.digest.data */
294 rte_memcpy(ops[i]->sym->auth.digest.data,
295 dst, SNOW3G_DIGEST_LENGTH);
300 return processed_ops;
303 /** Process a batch of crypto ops which shares the same session. */
305 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
306 struct ipsec_mb_qp *qp, uint8_t num_ops)
309 uint32_t processed_ops;
311 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
312 for (i = 0; i < num_ops; i++) {
313 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
314 (ops[i]->sym->m_dst != NULL &&
315 !rte_pktmbuf_is_contiguous(
316 ops[i]->sym->m_dst))) {
318 "PMD supports only contiguous mbufs, "
319 "op (%p) provides noncontiguous mbuf as "
320 "source/destination buffer.\n", ops[i]);
321 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
327 switch (session->op) {
328 case IPSEC_MB_OP_ENCRYPT_ONLY:
329 case IPSEC_MB_OP_DECRYPT_ONLY:
330 processed_ops = process_snow3g_cipher_op(qp, ops,
333 case IPSEC_MB_OP_HASH_GEN_ONLY:
334 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
335 processed_ops = process_snow3g_hash_op(qp, ops, session,
338 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
339 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
340 processed_ops = process_snow3g_cipher_op(qp, ops, session,
342 process_snow3g_hash_op(qp, ops, session, processed_ops);
344 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
345 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
346 processed_ops = process_snow3g_hash_op(qp, ops, session,
348 process_snow3g_cipher_op(qp, ops, session, processed_ops);
351 /* Operation not supported. */
355 for (i = 0; i < num_ops; i++) {
357 * If there was no error/authentication failure,
358 * change status to successful.
360 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
361 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
362 /* Free session if a session-less crypto op. */
363 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
364 memset(session, 0, sizeof(struct snow3g_session));
365 memset(ops[i]->sym->session, 0,
366 rte_cryptodev_sym_get_existing_header_session_size(
367 ops[i]->sym->session));
368 rte_mempool_put(qp->sess_mp_priv, session);
369 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
370 ops[i]->sym->session = NULL;
373 return processed_ops;
376 /** Process a crypto op with length/offset in bits. */
378 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
379 struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops)
381 uint32_t enqueued_op, processed_op;
383 switch (session->op) {
384 case IPSEC_MB_OP_ENCRYPT_ONLY:
385 case IPSEC_MB_OP_DECRYPT_ONLY:
387 processed_op = process_snow3g_cipher_op_bit(qp, op,
390 case IPSEC_MB_OP_HASH_GEN_ONLY:
391 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
392 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
394 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
395 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
396 processed_op = process_snow3g_cipher_op_bit(qp, op, session);
397 if (processed_op == 1)
398 process_snow3g_hash_op(qp, &op, session, 1);
400 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
401 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
402 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
403 if (processed_op == 1)
404 process_snow3g_cipher_op_bit(qp, op, session);
407 /* Operation not supported. */
412 * If there was no error/authentication failure,
413 * change status to successful.
415 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
416 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
418 /* Free session if a session-less crypto op. */
419 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
420 memset(op->sym->session, 0, sizeof(struct snow3g_session));
421 rte_cryptodev_sym_session_free(op->sym->session);
422 op->sym->session = NULL;
425 enqueued_op = rte_ring_enqueue_burst(qp->ingress_queue,
426 (void **)&op, processed_op, NULL);
427 qp->stats.enqueued_count += enqueued_op;
428 *accumulated_enqueued_ops += enqueued_op;
434 snow3g_pmd_dequeue_burst(void *queue_pair,
435 struct rte_crypto_op **ops, uint16_t nb_ops)
437 struct ipsec_mb_qp *qp = queue_pair;
438 struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
439 struct rte_crypto_op *curr_c_op;
441 struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
443 uint8_t burst_size = 0;
444 uint16_t enqueued_ops = 0;
445 uint8_t processed_ops;
446 uint32_t nb_dequeued;
448 nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
449 (void **)ops, nb_ops, NULL);
451 for (i = 0; i < nb_dequeued; i++) {
454 /* Set status as enqueued (not processed yet) by default. */
455 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
457 curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
458 if (unlikely(curr_sess == NULL ||
459 curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
461 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
465 /* If length/offset is at bit-level,
466 * process this buffer alone.
468 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
469 || ((curr_c_op->sym->cipher.data.offset
471 /* Process the ops of the previous session. */
472 if (prev_sess != NULL) {
473 processed_ops = process_ops(c_ops, prev_sess,
475 if (processed_ops < burst_size) {
484 processed_ops = process_op_bit(curr_c_op, curr_sess,
486 if (processed_ops != 1)
492 /* Batch ops that share the same session. */
493 if (prev_sess == NULL) {
494 prev_sess = curr_sess;
495 c_ops[burst_size++] = curr_c_op;
496 } else if (curr_sess == prev_sess) {
497 c_ops[burst_size++] = curr_c_op;
499 * When there are enough ops to process in a batch,
500 * process them, and start a new batch.
502 if (burst_size == SNOW3G_MAX_BURST) {
503 processed_ops = process_ops(c_ops, prev_sess,
505 if (processed_ops < burst_size) {
515 * Different session, process the ops
516 * of the previous session.
518 processed_ops = process_ops(c_ops, prev_sess,
520 if (processed_ops < burst_size) {
526 prev_sess = curr_sess;
528 c_ops[burst_size++] = curr_c_op;
532 if (burst_size != 0) {
533 /* Process the crypto ops of the last session. */
534 processed_ops = process_ops(c_ops, prev_sess,
538 qp->stats.dequeued_count += i;
542 struct rte_cryptodev_ops snow3g_pmd_ops = {
543 .dev_configure = ipsec_mb_config,
544 .dev_start = ipsec_mb_start,
545 .dev_stop = ipsec_mb_stop,
546 .dev_close = ipsec_mb_close,
548 .stats_get = ipsec_mb_stats_get,
549 .stats_reset = ipsec_mb_stats_reset,
551 .dev_infos_get = ipsec_mb_info_get,
553 .queue_pair_setup = ipsec_mb_qp_setup,
554 .queue_pair_release = ipsec_mb_qp_release,
556 .sym_session_get_size = ipsec_mb_sym_session_get_size,
557 .sym_session_configure = ipsec_mb_sym_session_configure,
558 .sym_session_clear = ipsec_mb_sym_session_clear
561 struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
564 snow3g_probe(struct rte_vdev_device *vdev)
566 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_SNOW3G);
569 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
570 .probe = snow3g_probe,
571 .remove = ipsec_mb_remove
574 static struct cryptodev_driver snow3g_crypto_drv;
576 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
577 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
578 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
579 "max_nb_queue_pairs=<int> socket_id=<int>");
580 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
581 cryptodev_snow3g_pmd_drv.driver,
582 pmd_driver_id_snow3g);
584 /* Constructor function to register snow3g PMD */
585 RTE_INIT(ipsec_mb_register_snow3g)
587 struct ipsec_mb_internals *snow3g_data
588 = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
590 snow3g_data->caps = snow3g_capabilities;
591 snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
592 snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
593 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
594 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
595 RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
596 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
597 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
598 snow3g_data->internals_priv_size = 0;
599 snow3g_data->ops = &snow3g_pmd_ops;
600 snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
601 snow3g_data->session_configure = snow3g_session_configure;
602 snow3g_data->session_priv_size = sizeof(struct snow3g_session);