7ced435e6d415c9a40f369643b25a796d1de733c
[dpdk.git] / drivers / crypto / ccp / ccp_crypto.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13
14 #include <rte_hexdump.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_spinlock.h>
19 #include <rte_string_fns.h>
20 #include <rte_cryptodev_pmd.h>
21
22 #include "ccp_dev.h"
23 #include "ccp_crypto.h"
24 #include "ccp_pci.h"
25 #include "ccp_pmd_private.h"
26
27 static enum ccp_cmd_order
28 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
29 {
30         enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
31
32         if (xform == NULL)
33                 return res;
34         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
35                 if (xform->next == NULL)
36                         return CCP_CMD_AUTH;
37                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
38                         return CCP_CMD_HASH_CIPHER;
39         }
40         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41                 if (xform->next == NULL)
42                         return CCP_CMD_CIPHER;
43                 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44                         return CCP_CMD_CIPHER_HASH;
45         }
46         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
47                 return CCP_CMD_COMBINED;
48         return res;
49 }
50
51 /* configure session */
52 static int
53 ccp_configure_session_cipher(struct ccp_session *sess,
54                              const struct rte_crypto_sym_xform *xform)
55 {
56         const struct rte_crypto_cipher_xform *cipher_xform = NULL;
57
58         cipher_xform = &xform->cipher;
59
60         /* set cipher direction */
61         if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
62                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
63         else
64                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
65
66         /* set cipher key */
67         sess->cipher.key_length = cipher_xform->key.length;
68         rte_memcpy(sess->cipher.key, cipher_xform->key.data,
69                    cipher_xform->key.length);
70
71         /* set iv parameters */
72         sess->iv.offset = cipher_xform->iv.offset;
73         sess->iv.length = cipher_xform->iv.length;
74
75         switch (cipher_xform->algo) {
76         default:
77                 CCP_LOG_ERR("Unsupported cipher algo");
78                 return -1;
79         }
80
81
82         switch (sess->cipher.engine) {
83         default:
84                 CCP_LOG_ERR("Invalid CCP Engine");
85                 return -ENOTSUP;
86         }
87         return 0;
88 }
89
90 static int
91 ccp_configure_session_auth(struct ccp_session *sess,
92                            const struct rte_crypto_sym_xform *xform)
93 {
94         const struct rte_crypto_auth_xform *auth_xform = NULL;
95
96         auth_xform = &xform->auth;
97
98         sess->auth.digest_length = auth_xform->digest_length;
99         if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
100                 sess->auth.op = CCP_AUTH_OP_GENERATE;
101         else
102                 sess->auth.op = CCP_AUTH_OP_VERIFY;
103         switch (auth_xform->algo) {
104         default:
105                 CCP_LOG_ERR("Unsupported hash algo");
106                 return -ENOTSUP;
107         }
108         return 0;
109 }
110
111 static int
112 ccp_configure_session_aead(struct ccp_session *sess,
113                            const struct rte_crypto_sym_xform *xform)
114 {
115         const struct rte_crypto_aead_xform *aead_xform = NULL;
116
117         aead_xform = &xform->aead;
118
119         sess->cipher.key_length = aead_xform->key.length;
120         rte_memcpy(sess->cipher.key, aead_xform->key.data,
121                    aead_xform->key.length);
122
123         if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
124                 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
125                 sess->auth.op = CCP_AUTH_OP_GENERATE;
126         } else {
127                 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
128                 sess->auth.op = CCP_AUTH_OP_VERIFY;
129         }
130         sess->auth.aad_length = aead_xform->aad_length;
131         sess->auth.digest_length = aead_xform->digest_length;
132
133         /* set iv parameters */
134         sess->iv.offset = aead_xform->iv.offset;
135         sess->iv.length = aead_xform->iv.length;
136
137         switch (aead_xform->algo) {
138         default:
139                 CCP_LOG_ERR("Unsupported aead algo");
140                 return -ENOTSUP;
141         }
142         return 0;
143 }
144
145 int
146 ccp_set_session_parameters(struct ccp_session *sess,
147                            const struct rte_crypto_sym_xform *xform)
148 {
149         const struct rte_crypto_sym_xform *cipher_xform = NULL;
150         const struct rte_crypto_sym_xform *auth_xform = NULL;
151         const struct rte_crypto_sym_xform *aead_xform = NULL;
152         int ret = 0;
153
154         sess->cmd_id = ccp_get_cmd_id(xform);
155
156         switch (sess->cmd_id) {
157         case CCP_CMD_CIPHER:
158                 cipher_xform = xform;
159                 break;
160         case CCP_CMD_AUTH:
161                 auth_xform = xform;
162                 break;
163         case CCP_CMD_CIPHER_HASH:
164                 cipher_xform = xform;
165                 auth_xform = xform->next;
166                 break;
167         case CCP_CMD_HASH_CIPHER:
168                 auth_xform = xform;
169                 cipher_xform = xform->next;
170                 break;
171         case CCP_CMD_COMBINED:
172                 aead_xform = xform;
173                 break;
174         default:
175                 CCP_LOG_ERR("Unsupported cmd_id");
176                 return -1;
177         }
178
179         /* Default IV length = 0 */
180         sess->iv.length = 0;
181         if (cipher_xform) {
182                 ret = ccp_configure_session_cipher(sess, cipher_xform);
183                 if (ret != 0) {
184                         CCP_LOG_ERR("Invalid/unsupported cipher parameters");
185                         return ret;
186                 }
187         }
188         if (auth_xform) {
189                 ret = ccp_configure_session_auth(sess, auth_xform);
190                 if (ret != 0) {
191                         CCP_LOG_ERR("Invalid/unsupported auth parameters");
192                         return ret;
193                 }
194         }
195         if (aead_xform) {
196                 ret = ccp_configure_session_aead(sess, aead_xform);
197                 if (ret != 0) {
198                         CCP_LOG_ERR("Invalid/unsupported aead parameters");
199                         return ret;
200                 }
201         }
202         return ret;
203 }
204
205 /* calculate CCP descriptors requirement */
206 static inline int
207 ccp_cipher_slot(struct ccp_session *session)
208 {
209         int count = 0;
210
211         switch (session->cipher.algo) {
212         default:
213                 CCP_LOG_ERR("Unsupported cipher algo %d",
214                             session->cipher.algo);
215         }
216         return count;
217 }
218
219 static inline int
220 ccp_auth_slot(struct ccp_session *session)
221 {
222         int count = 0;
223
224         switch (session->auth.algo) {
225         default:
226                 CCP_LOG_ERR("Unsupported auth algo %d",
227                             session->auth.algo);
228         }
229
230         return count;
231 }
232
233 static int
234 ccp_aead_slot(struct ccp_session *session)
235 {
236         int count = 0;
237
238         switch (session->aead_algo) {
239         default:
240                 CCP_LOG_ERR("Unsupported aead algo %d",
241                             session->aead_algo);
242         }
243         return count;
244 }
245
246 int
247 ccp_compute_slot_count(struct ccp_session *session)
248 {
249         int count = 0;
250
251         switch (session->cmd_id) {
252         case CCP_CMD_CIPHER:
253                 count = ccp_cipher_slot(session);
254                 break;
255         case CCP_CMD_AUTH:
256                 count = ccp_auth_slot(session);
257                 break;
258         case CCP_CMD_CIPHER_HASH:
259         case CCP_CMD_HASH_CIPHER:
260                 count = ccp_cipher_slot(session);
261                 count += ccp_auth_slot(session);
262                 break;
263         case CCP_CMD_COMBINED:
264                 count = ccp_aead_slot(session);
265                 break;
266         default:
267                 CCP_LOG_ERR("Unsupported cmd_id");
268
269         }
270
271         return count;
272 }
273
274 static inline int
275 ccp_crypto_cipher(struct rte_crypto_op *op,
276                   struct ccp_queue *cmd_q __rte_unused,
277                   struct ccp_batch_info *b_info __rte_unused)
278 {
279         int result = 0;
280         struct ccp_session *session;
281
282         session = (struct ccp_session *)get_session_private_data(
283                                          op->sym->session,
284                                          ccp_cryptodev_driver_id);
285
286         switch (session->cipher.algo) {
287         default:
288                 CCP_LOG_ERR("Unsupported cipher algo %d",
289                             session->cipher.algo);
290                 return -ENOTSUP;
291         }
292         return result;
293 }
294
295 static inline int
296 ccp_crypto_auth(struct rte_crypto_op *op,
297                 struct ccp_queue *cmd_q __rte_unused,
298                 struct ccp_batch_info *b_info __rte_unused)
299 {
300
301         int result = 0;
302         struct ccp_session *session;
303
304         session = (struct ccp_session *)get_session_private_data(
305                                          op->sym->session,
306                                         ccp_cryptodev_driver_id);
307
308         switch (session->auth.algo) {
309         default:
310                 CCP_LOG_ERR("Unsupported auth algo %d",
311                             session->auth.algo);
312                 return -ENOTSUP;
313         }
314
315         return result;
316 }
317
318 static inline int
319 ccp_crypto_aead(struct rte_crypto_op *op,
320                 struct ccp_queue *cmd_q __rte_unused,
321                 struct ccp_batch_info *b_info __rte_unused)
322 {
323         int result = 0;
324         struct ccp_session *session;
325
326         session = (struct ccp_session *)get_session_private_data(
327                                          op->sym->session,
328                                         ccp_cryptodev_driver_id);
329
330         switch (session->aead_algo) {
331         default:
332                 CCP_LOG_ERR("Unsupported aead algo %d",
333                             session->aead_algo);
334                 return -ENOTSUP;
335         }
336         return result;
337 }
338
339 int
340 process_ops_to_enqueue(const struct ccp_qp *qp,
341                        struct rte_crypto_op **op,
342                        struct ccp_queue *cmd_q,
343                        uint16_t nb_ops,
344                        int slots_req)
345 {
346         int i, result = 0;
347         struct ccp_batch_info *b_info;
348         struct ccp_session *session;
349
350         if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
351                 CCP_LOG_ERR("batch info allocation failed");
352                 return 0;
353         }
354         /* populate batch info necessary for dequeue */
355         b_info->op_idx = 0;
356         b_info->lsb_buf_idx = 0;
357         b_info->desccnt = 0;
358         b_info->cmd_q = cmd_q;
359         b_info->lsb_buf_phys =
360                 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
361         rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
362
363         b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
364                                          Q_DESC_SIZE);
365         for (i = 0; i < nb_ops; i++) {
366                 session = (struct ccp_session *)get_session_private_data(
367                                                  op[i]->sym->session,
368                                                  ccp_cryptodev_driver_id);
369                 switch (session->cmd_id) {
370                 case CCP_CMD_CIPHER:
371                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
372                         break;
373                 case CCP_CMD_AUTH:
374                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
375                         break;
376                 case CCP_CMD_CIPHER_HASH:
377                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
378                         if (result)
379                                 break;
380                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
381                         break;
382                 case CCP_CMD_HASH_CIPHER:
383                         result = ccp_crypto_auth(op[i], cmd_q, b_info);
384                         if (result)
385                                 break;
386                         result = ccp_crypto_cipher(op[i], cmd_q, b_info);
387                         break;
388                 case CCP_CMD_COMBINED:
389                         result = ccp_crypto_aead(op[i], cmd_q, b_info);
390                         break;
391                 default:
392                         CCP_LOG_ERR("Unsupported cmd_id");
393                         result = -1;
394                 }
395                 if (unlikely(result < 0)) {
396                         rte_atomic64_add(&b_info->cmd_q->free_slots,
397                                          (slots_req - b_info->desccnt));
398                         break;
399                 }
400                 b_info->op[i] = op[i];
401         }
402
403         b_info->opcnt = i;
404         b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
405                                          Q_DESC_SIZE);
406
407         rte_wmb();
408         /* Write the new tail address back to the queue register */
409         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
410                               b_info->tail_offset);
411         /* Turn the queue back on using our cached control register */
412         CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
413                               cmd_q->qcontrol | CMD_Q_RUN);
414
415         rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
416
417         return i;
418 }
419
420 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
421 {
422         struct ccp_session *session;
423         uint8_t *digest_data, *addr;
424         struct rte_mbuf *m_last;
425         int offset, digest_offset;
426         uint8_t digest_le[64];
427
428         session = (struct ccp_session *)get_session_private_data(
429                                          op->sym->session,
430                                         ccp_cryptodev_driver_id);
431
432         if (session->cmd_id == CCP_CMD_COMBINED) {
433                 digest_data = op->sym->aead.digest.data;
434                 digest_offset = op->sym->aead.data.offset +
435                                         op->sym->aead.data.length;
436         } else {
437                 digest_data = op->sym->auth.digest.data;
438                 digest_offset = op->sym->auth.data.offset +
439                                         op->sym->auth.data.length;
440         }
441         m_last = rte_pktmbuf_lastseg(op->sym->m_src);
442         addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
443                            m_last->data_len - session->auth.ctx_len);
444
445         rte_mb();
446         offset = session->auth.offset;
447
448         if (session->auth.engine == CCP_ENGINE_SHA)
449                 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
450                     (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
451                     (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
452                         /* All other algorithms require byte
453                          * swap done by host
454                          */
455                         unsigned int i;
456
457                         offset = session->auth.ctx_len -
458                                 session->auth.offset - 1;
459                         for (i = 0; i < session->auth.digest_length; i++)
460                                 digest_le[i] = addr[offset - i];
461                         offset = 0;
462                         addr = digest_le;
463                 }
464
465         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
466         if (session->auth.op == CCP_AUTH_OP_VERIFY) {
467                 if (memcmp(addr + offset, digest_data,
468                            session->auth.digest_length) != 0)
469                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
470
471         } else {
472                 if (unlikely(digest_data == 0))
473                         digest_data = rte_pktmbuf_mtod_offset(
474                                         op->sym->m_dst, uint8_t *,
475                                         digest_offset);
476                 rte_memcpy(digest_data, addr + offset,
477                            session->auth.digest_length);
478         }
479         /* Trim area used for digest from mbuf. */
480         rte_pktmbuf_trim(op->sym->m_src,
481                          session->auth.ctx_len);
482 }
483
484 static int
485 ccp_prepare_ops(struct rte_crypto_op **op_d,
486                 struct ccp_batch_info *b_info,
487                 uint16_t nb_ops)
488 {
489         int i, min_ops;
490         struct ccp_session *session;
491
492         min_ops = RTE_MIN(nb_ops, b_info->opcnt);
493
494         for (i = 0; i < min_ops; i++) {
495                 op_d[i] = b_info->op[b_info->op_idx++];
496                 session = (struct ccp_session *)get_session_private_data(
497                                                  op_d[i]->sym->session,
498                                                 ccp_cryptodev_driver_id);
499                 switch (session->cmd_id) {
500                 case CCP_CMD_CIPHER:
501                         op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
502                         break;
503                 case CCP_CMD_AUTH:
504                 case CCP_CMD_CIPHER_HASH:
505                 case CCP_CMD_HASH_CIPHER:
506                 case CCP_CMD_COMBINED:
507                         ccp_auth_dq_prepare(op_d[i]);
508                         break;
509                 default:
510                         CCP_LOG_ERR("Unsupported cmd_id");
511                 }
512         }
513
514         b_info->opcnt -= min_ops;
515         return min_ops;
516 }
517
518 int
519 process_ops_to_dequeue(struct ccp_qp *qp,
520                        struct rte_crypto_op **op,
521                        uint16_t nb_ops)
522 {
523         struct ccp_batch_info *b_info;
524         uint32_t cur_head_offset;
525
526         if (qp->b_info != NULL) {
527                 b_info = qp->b_info;
528                 if (unlikely(b_info->op_idx > 0))
529                         goto success;
530         } else if (rte_ring_dequeue(qp->processed_pkts,
531                                     (void **)&b_info))
532                 return 0;
533         cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
534                                        CMD_Q_HEAD_LO_BASE);
535
536         if (b_info->head_offset < b_info->tail_offset) {
537                 if ((cur_head_offset >= b_info->head_offset) &&
538                     (cur_head_offset < b_info->tail_offset)) {
539                         qp->b_info = b_info;
540                         return 0;
541                 }
542         } else {
543                 if ((cur_head_offset >= b_info->head_offset) ||
544                     (cur_head_offset < b_info->tail_offset)) {
545                         qp->b_info = b_info;
546                         return 0;
547                 }
548         }
549
550
551 success:
552         nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
553         rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
554         b_info->desccnt = 0;
555         if (b_info->opcnt > 0) {
556                 qp->b_info = b_info;
557         } else {
558                 rte_mempool_put(qp->batch_mp, (void *)b_info);
559                 qp->b_info = NULL;
560         }
561
562         return nb_ops;
563 }