crypto/ipsec_mb: move snow3g PMD
[dpdk.git] / drivers / crypto / ipsec_mb / pmd_snow3g.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2021 Intel Corporation
3  */
4
5 #include "pmd_snow3g_priv.h"
6
7 /** Parse crypto xform chain and set private session parameters. */
8 static int
9 snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
10                 const struct rte_crypto_sym_xform *xform)
11 {
12         struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
13         const struct rte_crypto_sym_xform *auth_xform = NULL;
14         const struct rte_crypto_sym_xform *cipher_xform = NULL;
15         enum ipsec_mb_operation mode;
16
17         /* Select Crypto operation - hash then cipher / cipher then hash */
18         int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
19                                 &cipher_xform, NULL);
20         if (ret)
21                 return ret;
22
23         if (cipher_xform) {
24                 /* Only SNOW 3G UEA2 supported */
25                 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
26                         return -ENOTSUP;
27
28                 if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
29                         IPSEC_MB_LOG(ERR, "Wrong IV length");
30                         return -EINVAL;
31                 }
32                 if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
33                         IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
34                         return -ENOMEM;
35                 }
36
37                 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
38
39                 /* Initialize key */
40                 IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
41                                         &sess->pKeySched_cipher);
42         }
43
44         if (auth_xform) {
45                 /* Only SNOW 3G UIA2 supported */
46                 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
47                         return -ENOTSUP;
48
49                 if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
50                         IPSEC_MB_LOG(ERR, "Wrong digest length");
51                         return -EINVAL;
52                 }
53                 if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
54                         IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
55                         return -ENOMEM;
56                 }
57
58                 sess->auth_op = auth_xform->auth.op;
59
60                 if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
61                         IPSEC_MB_LOG(ERR, "Wrong IV length");
62                         return -EINVAL;
63                 }
64                 sess->auth_iv_offset = auth_xform->auth.iv.offset;
65
66                 /* Initialize key */
67                 IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
68                                         &sess->pKeySched_hash);
69         }
70
71         sess->op = mode;
72
73         return 0;
74 }
75
76 /** Encrypt/decrypt mbufs with same cipher key. */
77 static uint8_t
78 process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
79                 struct snow3g_session *session,
80                 uint8_t num_ops)
81 {
82         uint32_t i;
83         uint8_t processed_ops = 0;
84         const void *src[SNOW3G_MAX_BURST];
85         void *dst[SNOW3G_MAX_BURST];
86         const void *iv[SNOW3G_MAX_BURST];
87         uint32_t num_bytes[SNOW3G_MAX_BURST];
88
89         for (i = 0; i < num_ops; i++) {
90                 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
91                                 (ops[i]->sym->cipher.data.offset >> 3);
92                 dst[i] = ops[i]->sym->m_dst ?
93                         rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
94                                 (ops[i]->sym->cipher.data.offset >> 3) :
95                         rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
96                                 (ops[i]->sym->cipher.data.offset >> 3);
97                 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
98                                 session->cipher_iv_offset);
99                 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
100
101                 processed_ops++;
102         }
103
104         IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
105                         src, dst, num_bytes, processed_ops);
106
107         return processed_ops;
108 }
109
110 /** Encrypt/decrypt mbuf (bit level function). */
111 static uint8_t
112 process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
113                 struct rte_crypto_op *op,
114                 struct snow3g_session *session)
115 {
116         uint8_t *src, *dst;
117         uint8_t *iv;
118         uint32_t length_in_bits, offset_in_bits;
119
120         offset_in_bits = op->sym->cipher.data.offset;
121         src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
122         if (op->sym->m_dst == NULL) {
123                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
124                 IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
125                 return 0;
126         }
127         dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
128         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
129                                 session->cipher_iv_offset);
130         length_in_bits = op->sym->cipher.data.length;
131
132         IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
133                         src, dst, length_in_bits, offset_in_bits);
134
135         return 1;
136 }
137
138 /** Generate/verify hash from mbufs with same hash key. */
139 static int
140 process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
141                 struct snow3g_session *session,
142                 uint8_t num_ops)
143 {
144         uint32_t i;
145         uint8_t processed_ops = 0;
146         uint8_t *src, *dst;
147         uint32_t length_in_bits;
148         uint8_t *iv;
149         struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
150
151         for (i = 0; i < num_ops; i++) {
152                 /* Data must be byte aligned */
153                 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
154                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
155                         IPSEC_MB_LOG(ERR, "Offset");
156                         break;
157                 }
158
159                 length_in_bits = ops[i]->sym->auth.data.length;
160
161                 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
162                                 (ops[i]->sym->auth.data.offset >> 3);
163                 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
164                                 session->auth_iv_offset);
165
166                 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
167                         dst = qp_data->temp_digest;
168
169                         IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
170                                         &session->pKeySched_hash,
171                                         iv, src, length_in_bits, dst);
172                         /* Verify digest. */
173                         if (memcmp(dst, ops[i]->sym->auth.digest.data,
174                                         SNOW3G_DIGEST_LENGTH) != 0)
175                                 ops[i]->status =
176                                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
177                 } else  {
178                         dst = ops[i]->sym->auth.digest.data;
179
180                         IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
181                                         &session->pKeySched_hash,
182                                         iv, src, length_in_bits, dst);
183                 }
184                 processed_ops++;
185         }
186
187         return processed_ops;
188 }
189
190 /** Process a batch of crypto ops which shares the same session. */
191 static int
192 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
193                 struct ipsec_mb_qp *qp, uint8_t num_ops)
194 {
195         uint32_t i;
196         uint32_t processed_ops;
197
198 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
199         for (i = 0; i < num_ops; i++) {
200                 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
201                                 (ops[i]->sym->m_dst != NULL &&
202                                 !rte_pktmbuf_is_contiguous(
203                                                 ops[i]->sym->m_dst))) {
204                         IPSEC_MB_LOG(ERR,
205                                 "PMD supports only contiguous mbufs, "
206                                 "op (%p) provides noncontiguous mbuf as "
207                                 "source/destination buffer.\n", ops[i]);
208                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
209                         return 0;
210                 }
211         }
212 #endif
213
214         switch (session->op) {
215         case IPSEC_MB_OP_ENCRYPT_ONLY:
216         case IPSEC_MB_OP_DECRYPT_ONLY:
217                 processed_ops = process_snow3g_cipher_op(qp, ops,
218                                 session, num_ops);
219                 break;
220         case IPSEC_MB_OP_HASH_GEN_ONLY:
221         case IPSEC_MB_OP_HASH_VERIFY_ONLY:
222                 processed_ops = process_snow3g_hash_op(qp, ops, session,
223                                 num_ops);
224                 break;
225         case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
226         case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
227                 processed_ops = process_snow3g_cipher_op(qp, ops, session,
228                                 num_ops);
229                 process_snow3g_hash_op(qp, ops, session, processed_ops);
230                 break;
231         case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
232         case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
233                 processed_ops = process_snow3g_hash_op(qp, ops, session,
234                                 num_ops);
235                 process_snow3g_cipher_op(qp, ops, session, processed_ops);
236                 break;
237         default:
238                 /* Operation not supported. */
239                 processed_ops = 0;
240         }
241
242         for (i = 0; i < num_ops; i++) {
243                 /*
244                  * If there was no error/authentication failure,
245                  * change status to successful.
246                  */
247                 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
248                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
249                 /* Free session if a session-less crypto op. */
250                 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
251                         memset(session, 0, sizeof(struct snow3g_session));
252                         memset(ops[i]->sym->session, 0,
253                         rte_cryptodev_sym_get_existing_header_session_size(
254                                         ops[i]->sym->session));
255                         rte_mempool_put(qp->sess_mp_priv, session);
256                         rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
257                         ops[i]->sym->session = NULL;
258                 }
259         }
260         return processed_ops;
261 }
262
263 /** Process a crypto op with length/offset in bits. */
264 static int
265 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
266                 struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops)
267 {
268         uint32_t enqueued_op, processed_op;
269
270         switch (session->op) {
271         case IPSEC_MB_OP_ENCRYPT_ONLY:
272         case IPSEC_MB_OP_DECRYPT_ONLY:
273
274                 processed_op = process_snow3g_cipher_op_bit(qp, op,
275                                 session);
276                 break;
277         case IPSEC_MB_OP_HASH_GEN_ONLY:
278         case IPSEC_MB_OP_HASH_VERIFY_ONLY:
279                 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
280                 break;
281         case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
282         case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
283                 processed_op = process_snow3g_cipher_op_bit(qp, op, session);
284                 if (processed_op == 1)
285                         process_snow3g_hash_op(qp, &op, session, 1);
286                 break;
287         case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
288         case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
289                 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
290                 if (processed_op == 1)
291                         process_snow3g_cipher_op_bit(qp, op, session);
292                 break;
293         default:
294                 /* Operation not supported. */
295                 processed_op = 0;
296         }
297
298         /*
299          * If there was no error/authentication failure,
300          * change status to successful.
301          */
302         if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
303                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
304
305         /* Free session if a session-less crypto op. */
306         if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
307                 memset(op->sym->session, 0, sizeof(struct snow3g_session));
308                 rte_cryptodev_sym_session_free(op->sym->session);
309                 op->sym->session = NULL;
310         }
311
312         enqueued_op = rte_ring_enqueue_burst(qp->ingress_queue,
313                         (void **)&op, processed_op, NULL);
314         qp->stats.enqueued_count += enqueued_op;
315         *accumulated_enqueued_ops += enqueued_op;
316
317         return enqueued_op;
318 }
319
320 static uint16_t
321 snow3g_pmd_dequeue_burst(void *queue_pair,
322                 struct rte_crypto_op **ops, uint16_t nb_ops)
323 {
324         struct ipsec_mb_qp *qp = queue_pair;
325         struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
326         struct rte_crypto_op *curr_c_op;
327
328         struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
329         uint32_t i;
330         uint8_t burst_size = 0;
331         uint16_t enqueued_ops = 0;
332         uint8_t processed_ops;
333         uint32_t nb_dequeued;
334
335         nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
336                         (void **)ops, nb_ops, NULL);
337
338         for (i = 0; i < nb_dequeued; i++) {
339                 curr_c_op = ops[i];
340
341                 /* Set status as enqueued (not processed yet) by default. */
342                 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
343
344                 curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
345                 if (unlikely(curr_sess == NULL ||
346                                 curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
347                         curr_c_op->status =
348                                         RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
349                         break;
350                 }
351
352                 /* If length/offset is at bit-level,
353                  * process this buffer alone.
354                  */
355                 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
356                                 || ((curr_c_op->sym->cipher.data.offset
357                                         % BYTE_LEN) != 0)) {
358                         /* Process the ops of the previous session. */
359                         if (prev_sess != NULL) {
360                                 processed_ops = process_ops(c_ops, prev_sess,
361                                                 qp, burst_size);
362                                 if (processed_ops < burst_size) {
363                                         burst_size = 0;
364                                         break;
365                                 }
366
367                                 burst_size = 0;
368                                 prev_sess = NULL;
369                         }
370
371                         processed_ops = process_op_bit(curr_c_op, curr_sess,
372                                                         qp, &enqueued_ops);
373                         if (processed_ops != 1)
374                                 break;
375
376                         continue;
377                 }
378
379                 /* Batch ops that share the same session. */
380                 if (prev_sess == NULL) {
381                         prev_sess = curr_sess;
382                         c_ops[burst_size++] = curr_c_op;
383                 } else if (curr_sess == prev_sess) {
384                         c_ops[burst_size++] = curr_c_op;
385                         /*
386                          * When there are enough ops to process in a batch,
387                          * process them, and start a new batch.
388                          */
389                         if (burst_size == SNOW3G_MAX_BURST) {
390                                 processed_ops = process_ops(c_ops, prev_sess,
391                                                 qp, burst_size);
392                                 if (processed_ops < burst_size) {
393                                         burst_size = 0;
394                                         break;
395                                 }
396
397                                 burst_size = 0;
398                                 prev_sess = NULL;
399                         }
400                 } else {
401                         /*
402                          * Different session, process the ops
403                          * of the previous session.
404                          */
405                         processed_ops = process_ops(c_ops, prev_sess,
406                                         qp, burst_size);
407                         if (processed_ops < burst_size) {
408                                 burst_size = 0;
409                                 break;
410                         }
411
412                         burst_size = 0;
413                         prev_sess = curr_sess;
414
415                         c_ops[burst_size++] = curr_c_op;
416                 }
417         }
418
419         if (burst_size != 0) {
420                 /* Process the crypto ops of the last session. */
421                 processed_ops = process_ops(c_ops, prev_sess,
422                                 qp, burst_size);
423         }
424
425         qp->stats.dequeued_count += i;
426         return i;
427 }
428
429 struct rte_cryptodev_ops snow3g_pmd_ops = {
430         .dev_configure = ipsec_mb_config,
431         .dev_start = ipsec_mb_start,
432         .dev_stop = ipsec_mb_stop,
433         .dev_close = ipsec_mb_close,
434
435         .stats_get = ipsec_mb_stats_get,
436         .stats_reset = ipsec_mb_stats_reset,
437
438         .dev_infos_get = ipsec_mb_info_get,
439
440         .queue_pair_setup = ipsec_mb_qp_setup,
441         .queue_pair_release = ipsec_mb_qp_release,
442
443         .sym_session_get_size = ipsec_mb_sym_session_get_size,
444         .sym_session_configure = ipsec_mb_sym_session_configure,
445         .sym_session_clear = ipsec_mb_sym_session_clear
446 };
447
448 struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
449
450 static int
451 snow3g_probe(struct rte_vdev_device *vdev)
452 {
453         return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_SNOW3G);
454 }
455
456 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
457         .probe = snow3g_probe,
458         .remove = ipsec_mb_remove
459 };
460
461 static struct cryptodev_driver snow3g_crypto_drv;
462
463 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
464 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
465 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
466                                "max_nb_queue_pairs=<int> socket_id=<int>");
467 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
468                                 cryptodev_snow3g_pmd_drv.driver,
469                                 pmd_driver_id_snow3g);
470
471 /* Constructor function to register snow3g PMD */
472 RTE_INIT(ipsec_mb_register_snow3g)
473 {
474         struct ipsec_mb_internals *snow3g_data
475                 = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
476
477         snow3g_data->caps = snow3g_capabilities;
478         snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
479         snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
480                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
481                         RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
482                         RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
483                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
484         snow3g_data->internals_priv_size = 0;
485         snow3g_data->ops = &snow3g_pmd_ops;
486         snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
487         snow3g_data->session_configure = snow3g_session_configure;
488         snow3g_data->session_priv_size = sizeof(struct snow3g_session);
489 }