1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
10 #include <sys/queue.h>
11 #include <sys/types.h>
14 #include <rte_hexdump.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_spinlock.h>
19 #include <rte_string_fns.h>
20 #include <rte_cryptodev_pmd.h>
23 #include "ccp_crypto.h"
25 #include "ccp_pmd_private.h"
27 static enum ccp_cmd_order
28 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
30 enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
34 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
35 if (xform->next == NULL)
37 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
38 return CCP_CMD_HASH_CIPHER;
40 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41 if (xform->next == NULL)
42 return CCP_CMD_CIPHER;
43 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44 return CCP_CMD_CIPHER_HASH;
46 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
47 return CCP_CMD_COMBINED;
51 /* configure session */
53 ccp_configure_session_cipher(struct ccp_session *sess,
54 const struct rte_crypto_sym_xform *xform)
56 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
59 cipher_xform = &xform->cipher;
61 /* set cipher direction */
62 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
63 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
65 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
68 sess->cipher.key_length = cipher_xform->key.length;
69 rte_memcpy(sess->cipher.key, cipher_xform->key.data,
70 cipher_xform->key.length);
72 /* set iv parameters */
73 sess->iv.offset = cipher_xform->iv.offset;
74 sess->iv.length = cipher_xform->iv.length;
76 switch (cipher_xform->algo) {
77 case RTE_CRYPTO_CIPHER_AES_CTR:
78 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
79 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
80 sess->cipher.engine = CCP_ENGINE_AES;
82 case RTE_CRYPTO_CIPHER_AES_ECB:
83 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
84 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
85 sess->cipher.engine = CCP_ENGINE_AES;
87 case RTE_CRYPTO_CIPHER_AES_CBC:
88 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
89 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
90 sess->cipher.engine = CCP_ENGINE_AES;
93 CCP_LOG_ERR("Unsupported cipher algo");
98 switch (sess->cipher.engine) {
100 if (sess->cipher.key_length == 16)
101 sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
102 else if (sess->cipher.key_length == 24)
103 sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
104 else if (sess->cipher.key_length == 32)
105 sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
107 CCP_LOG_ERR("Invalid cipher key length");
110 for (i = 0; i < sess->cipher.key_length ; i++)
111 sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
115 CCP_LOG_ERR("Invalid CCP Engine");
118 sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
119 sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
124 ccp_configure_session_auth(struct ccp_session *sess,
125 const struct rte_crypto_sym_xform *xform)
127 const struct rte_crypto_auth_xform *auth_xform = NULL;
129 auth_xform = &xform->auth;
131 sess->auth.digest_length = auth_xform->digest_length;
132 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
133 sess->auth.op = CCP_AUTH_OP_GENERATE;
135 sess->auth.op = CCP_AUTH_OP_VERIFY;
136 switch (auth_xform->algo) {
138 CCP_LOG_ERR("Unsupported hash algo");
145 ccp_configure_session_aead(struct ccp_session *sess,
146 const struct rte_crypto_sym_xform *xform)
148 const struct rte_crypto_aead_xform *aead_xform = NULL;
150 aead_xform = &xform->aead;
152 sess->cipher.key_length = aead_xform->key.length;
153 rte_memcpy(sess->cipher.key, aead_xform->key.data,
154 aead_xform->key.length);
156 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
157 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
158 sess->auth.op = CCP_AUTH_OP_GENERATE;
160 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
161 sess->auth.op = CCP_AUTH_OP_VERIFY;
163 sess->auth.aad_length = aead_xform->aad_length;
164 sess->auth.digest_length = aead_xform->digest_length;
166 /* set iv parameters */
167 sess->iv.offset = aead_xform->iv.offset;
168 sess->iv.length = aead_xform->iv.length;
170 switch (aead_xform->algo) {
172 CCP_LOG_ERR("Unsupported aead algo");
179 ccp_set_session_parameters(struct ccp_session *sess,
180 const struct rte_crypto_sym_xform *xform)
182 const struct rte_crypto_sym_xform *cipher_xform = NULL;
183 const struct rte_crypto_sym_xform *auth_xform = NULL;
184 const struct rte_crypto_sym_xform *aead_xform = NULL;
187 sess->cmd_id = ccp_get_cmd_id(xform);
189 switch (sess->cmd_id) {
191 cipher_xform = xform;
196 case CCP_CMD_CIPHER_HASH:
197 cipher_xform = xform;
198 auth_xform = xform->next;
200 case CCP_CMD_HASH_CIPHER:
202 cipher_xform = xform->next;
204 case CCP_CMD_COMBINED:
208 CCP_LOG_ERR("Unsupported cmd_id");
212 /* Default IV length = 0 */
215 ret = ccp_configure_session_cipher(sess, cipher_xform);
217 CCP_LOG_ERR("Invalid/unsupported cipher parameters");
222 ret = ccp_configure_session_auth(sess, auth_xform);
224 CCP_LOG_ERR("Invalid/unsupported auth parameters");
229 ret = ccp_configure_session_aead(sess, aead_xform);
231 CCP_LOG_ERR("Invalid/unsupported aead parameters");
238 /* calculate CCP descriptors requirement */
240 ccp_cipher_slot(struct ccp_session *session)
244 switch (session->cipher.algo) {
245 case CCP_CIPHER_ALGO_AES_CBC:
247 /**< op + passthrough for iv */
249 case CCP_CIPHER_ALGO_AES_ECB:
253 case CCP_CIPHER_ALGO_AES_CTR:
255 /**< op + passthrough for iv */
258 CCP_LOG_ERR("Unsupported cipher algo %d",
259 session->cipher.algo);
265 ccp_auth_slot(struct ccp_session *session)
269 switch (session->auth.algo) {
271 CCP_LOG_ERR("Unsupported auth algo %d",
279 ccp_aead_slot(struct ccp_session *session)
283 switch (session->aead_algo) {
285 CCP_LOG_ERR("Unsupported aead algo %d",
292 ccp_compute_slot_count(struct ccp_session *session)
296 switch (session->cmd_id) {
298 count = ccp_cipher_slot(session);
301 count = ccp_auth_slot(session);
303 case CCP_CMD_CIPHER_HASH:
304 case CCP_CMD_HASH_CIPHER:
305 count = ccp_cipher_slot(session);
306 count += ccp_auth_slot(session);
308 case CCP_CMD_COMBINED:
309 count = ccp_aead_slot(session);
312 CCP_LOG_ERR("Unsupported cmd_id");
320 ccp_perform_passthru(struct ccp_passthru *pst,
321 struct ccp_queue *cmd_q)
323 struct ccp_desc *desc;
324 union ccp_function function;
326 desc = &cmd_q->qbase_desc[cmd_q->qidx];
328 CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
330 CCP_CMD_SOC(desc) = 0;
331 CCP_CMD_IOC(desc) = 0;
332 CCP_CMD_INIT(desc) = 0;
333 CCP_CMD_EOM(desc) = 0;
334 CCP_CMD_PROT(desc) = 0;
337 CCP_PT_BYTESWAP(&function) = pst->byte_swap;
338 CCP_PT_BITWISE(&function) = pst->bit_mod;
339 CCP_CMD_FUNCTION(desc) = function.raw;
341 CCP_CMD_LEN(desc) = pst->len;
344 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
345 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
346 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
348 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
349 CCP_CMD_DST_HI(desc) = 0;
350 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
352 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
353 CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
356 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
357 CCP_CMD_SRC_HI(desc) = 0;
358 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
360 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
361 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
362 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
365 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
369 ccp_perform_aes(struct rte_crypto_op *op,
370 struct ccp_queue *cmd_q,
371 struct ccp_batch_info *b_info)
373 struct ccp_session *session;
374 union ccp_function function;
376 struct ccp_passthru pst = {0};
377 struct ccp_desc *desc;
378 phys_addr_t src_addr, dest_addr, key_addr;
381 session = (struct ccp_session *)get_session_private_data(
383 ccp_cryptodev_driver_id);
386 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
387 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
388 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
389 rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
390 iv, session->iv.length);
391 pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
392 CCP_AES_SIZE(&function) = 0x1F;
395 &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
397 (CCP_SB_BYTES - session->iv.length),
398 iv, session->iv.length);
399 pst.src_addr = b_info->lsb_buf_phys +
400 (b_info->lsb_buf_idx * CCP_SB_BYTES);
401 b_info->lsb_buf_idx++;
404 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
405 pst.len = CCP_SB_BYTES;
407 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
408 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
409 ccp_perform_passthru(&pst, cmd_q);
412 desc = &cmd_q->qbase_desc[cmd_q->qidx];
414 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
415 op->sym->cipher.data.offset);
416 if (likely(op->sym->m_dst != NULL))
417 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
418 op->sym->cipher.data.offset);
420 dest_addr = src_addr;
421 key_addr = session->cipher.key_phys;
423 /* prepare desc for aes command */
424 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
425 CCP_CMD_INIT(desc) = 1;
426 CCP_CMD_EOM(desc) = 1;
428 CCP_AES_ENCRYPT(&function) = session->cipher.dir;
429 CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
430 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
431 CCP_CMD_FUNCTION(desc) = function.raw;
433 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
435 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
436 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
437 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
439 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
440 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
441 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
443 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
444 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
445 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
447 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
448 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
450 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
451 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
456 ccp_crypto_cipher(struct rte_crypto_op *op,
457 struct ccp_queue *cmd_q,
458 struct ccp_batch_info *b_info)
461 struct ccp_session *session;
463 session = (struct ccp_session *)get_session_private_data(
465 ccp_cryptodev_driver_id);
467 switch (session->cipher.algo) {
468 case CCP_CIPHER_ALGO_AES_CBC:
469 result = ccp_perform_aes(op, cmd_q, b_info);
470 b_info->desccnt += 2;
472 case CCP_CIPHER_ALGO_AES_CTR:
473 result = ccp_perform_aes(op, cmd_q, b_info);
474 b_info->desccnt += 2;
476 case CCP_CIPHER_ALGO_AES_ECB:
477 result = ccp_perform_aes(op, cmd_q, b_info);
478 b_info->desccnt += 1;
481 CCP_LOG_ERR("Unsupported cipher algo %d",
482 session->cipher.algo);
489 ccp_crypto_auth(struct rte_crypto_op *op,
490 struct ccp_queue *cmd_q __rte_unused,
491 struct ccp_batch_info *b_info __rte_unused)
495 struct ccp_session *session;
497 session = (struct ccp_session *)get_session_private_data(
499 ccp_cryptodev_driver_id);
501 switch (session->auth.algo) {
503 CCP_LOG_ERR("Unsupported auth algo %d",
512 ccp_crypto_aead(struct rte_crypto_op *op,
513 struct ccp_queue *cmd_q __rte_unused,
514 struct ccp_batch_info *b_info __rte_unused)
517 struct ccp_session *session;
519 session = (struct ccp_session *)get_session_private_data(
521 ccp_cryptodev_driver_id);
523 switch (session->aead_algo) {
525 CCP_LOG_ERR("Unsupported aead algo %d",
533 process_ops_to_enqueue(const struct ccp_qp *qp,
534 struct rte_crypto_op **op,
535 struct ccp_queue *cmd_q,
540 struct ccp_batch_info *b_info;
541 struct ccp_session *session;
543 if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
544 CCP_LOG_ERR("batch info allocation failed");
547 /* populate batch info necessary for dequeue */
549 b_info->lsb_buf_idx = 0;
551 b_info->cmd_q = cmd_q;
552 b_info->lsb_buf_phys =
553 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
554 rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
556 b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
558 for (i = 0; i < nb_ops; i++) {
559 session = (struct ccp_session *)get_session_private_data(
561 ccp_cryptodev_driver_id);
562 switch (session->cmd_id) {
564 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
567 result = ccp_crypto_auth(op[i], cmd_q, b_info);
569 case CCP_CMD_CIPHER_HASH:
570 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
573 result = ccp_crypto_auth(op[i], cmd_q, b_info);
575 case CCP_CMD_HASH_CIPHER:
576 result = ccp_crypto_auth(op[i], cmd_q, b_info);
579 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
581 case CCP_CMD_COMBINED:
582 result = ccp_crypto_aead(op[i], cmd_q, b_info);
585 CCP_LOG_ERR("Unsupported cmd_id");
588 if (unlikely(result < 0)) {
589 rte_atomic64_add(&b_info->cmd_q->free_slots,
590 (slots_req - b_info->desccnt));
593 b_info->op[i] = op[i];
597 b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
601 /* Write the new tail address back to the queue register */
602 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
603 b_info->tail_offset);
604 /* Turn the queue back on using our cached control register */
605 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
606 cmd_q->qcontrol | CMD_Q_RUN);
608 rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
613 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
615 struct ccp_session *session;
616 uint8_t *digest_data, *addr;
617 struct rte_mbuf *m_last;
618 int offset, digest_offset;
619 uint8_t digest_le[64];
621 session = (struct ccp_session *)get_session_private_data(
623 ccp_cryptodev_driver_id);
625 if (session->cmd_id == CCP_CMD_COMBINED) {
626 digest_data = op->sym->aead.digest.data;
627 digest_offset = op->sym->aead.data.offset +
628 op->sym->aead.data.length;
630 digest_data = op->sym->auth.digest.data;
631 digest_offset = op->sym->auth.data.offset +
632 op->sym->auth.data.length;
634 m_last = rte_pktmbuf_lastseg(op->sym->m_src);
635 addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
636 m_last->data_len - session->auth.ctx_len);
639 offset = session->auth.offset;
641 if (session->auth.engine == CCP_ENGINE_SHA)
642 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
643 (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
644 (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
645 /* All other algorithms require byte
650 offset = session->auth.ctx_len -
651 session->auth.offset - 1;
652 for (i = 0; i < session->auth.digest_length; i++)
653 digest_le[i] = addr[offset - i];
658 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
659 if (session->auth.op == CCP_AUTH_OP_VERIFY) {
660 if (memcmp(addr + offset, digest_data,
661 session->auth.digest_length) != 0)
662 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
665 if (unlikely(digest_data == 0))
666 digest_data = rte_pktmbuf_mtod_offset(
667 op->sym->m_dst, uint8_t *,
669 rte_memcpy(digest_data, addr + offset,
670 session->auth.digest_length);
672 /* Trim area used for digest from mbuf. */
673 rte_pktmbuf_trim(op->sym->m_src,
674 session->auth.ctx_len);
678 ccp_prepare_ops(struct rte_crypto_op **op_d,
679 struct ccp_batch_info *b_info,
683 struct ccp_session *session;
685 min_ops = RTE_MIN(nb_ops, b_info->opcnt);
687 for (i = 0; i < min_ops; i++) {
688 op_d[i] = b_info->op[b_info->op_idx++];
689 session = (struct ccp_session *)get_session_private_data(
690 op_d[i]->sym->session,
691 ccp_cryptodev_driver_id);
692 switch (session->cmd_id) {
694 op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
697 case CCP_CMD_CIPHER_HASH:
698 case CCP_CMD_HASH_CIPHER:
699 case CCP_CMD_COMBINED:
700 ccp_auth_dq_prepare(op_d[i]);
703 CCP_LOG_ERR("Unsupported cmd_id");
707 b_info->opcnt -= min_ops;
712 process_ops_to_dequeue(struct ccp_qp *qp,
713 struct rte_crypto_op **op,
716 struct ccp_batch_info *b_info;
717 uint32_t cur_head_offset;
719 if (qp->b_info != NULL) {
721 if (unlikely(b_info->op_idx > 0))
723 } else if (rte_ring_dequeue(qp->processed_pkts,
726 cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
729 if (b_info->head_offset < b_info->tail_offset) {
730 if ((cur_head_offset >= b_info->head_offset) &&
731 (cur_head_offset < b_info->tail_offset)) {
736 if ((cur_head_offset >= b_info->head_offset) ||
737 (cur_head_offset < b_info->tail_offset)) {
745 nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
746 rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
748 if (b_info->opcnt > 0) {
751 rte_mempool_put(qp->batch_mp, (void *)b_info);