1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
10 #include <sys/queue.h>
11 #include <sys/types.h>
13 #include <openssl/cmac.h> /*sub key apis*/
14 #include <openssl/evp.h> /*sub key apis*/
16 #include <rte_hexdump.h>
17 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_spinlock.h>
21 #include <rte_string_fns.h>
22 #include <rte_cryptodev_pmd.h>
25 #include "ccp_crypto.h"
27 #include "ccp_pmd_private.h"
29 static enum ccp_cmd_order
30 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
32 enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
36 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
37 if (xform->next == NULL)
39 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
40 return CCP_CMD_HASH_CIPHER;
42 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
43 if (xform->next == NULL)
44 return CCP_CMD_CIPHER;
45 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
46 return CCP_CMD_CIPHER_HASH;
48 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
49 return CCP_CMD_COMBINED;
53 /* prepare temporary keys K1 and K2 */
54 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
57 /* Shift block to left, including carry */
58 for (i = 0; i < bl; i++) {
60 if (i < bl - 1 && l[i + 1] & 0x80)
63 /* If MSB set fixup with R */
65 k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
68 /* subkeys K1 and K2 generation for CMAC */
70 generate_cmac_subkeys(struct ccp_session *sess)
72 const EVP_CIPHER *algo;
74 unsigned char *ccp_ctx;
77 unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
78 unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
79 unsigned char k1[AES_BLOCK_SIZE] = {0};
80 unsigned char k2[AES_BLOCK_SIZE] = {0};
82 if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
83 algo = EVP_aes_128_cbc();
84 else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
85 algo = EVP_aes_192_cbc();
86 else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
87 algo = EVP_aes_256_cbc();
89 CCP_LOG_ERR("Invalid CMAC type length");
93 ctx = EVP_CIPHER_CTX_new();
95 CCP_LOG_ERR("ctx creation failed");
98 if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
99 (unsigned char *)zero_iv) <= 0)
100 goto key_generate_err;
101 if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
102 goto key_generate_err;
103 if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
104 AES_BLOCK_SIZE) <= 0)
105 goto key_generate_err;
106 if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
107 goto key_generate_err;
109 memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
111 ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
112 prepare_key(k1, dst, AES_BLOCK_SIZE);
113 for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
116 ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
117 (2 * CCP_SB_BYTES) - 1);
118 prepare_key(k2, k1, AES_BLOCK_SIZE);
119 for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
122 EVP_CIPHER_CTX_free(ctx);
127 CCP_LOG_ERR("CMAC Init failed");
131 /* configure session */
133 ccp_configure_session_cipher(struct ccp_session *sess,
134 const struct rte_crypto_sym_xform *xform)
136 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
139 cipher_xform = &xform->cipher;
141 /* set cipher direction */
142 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
143 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
145 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
148 sess->cipher.key_length = cipher_xform->key.length;
149 rte_memcpy(sess->cipher.key, cipher_xform->key.data,
150 cipher_xform->key.length);
152 /* set iv parameters */
153 sess->iv.offset = cipher_xform->iv.offset;
154 sess->iv.length = cipher_xform->iv.length;
156 switch (cipher_xform->algo) {
157 case RTE_CRYPTO_CIPHER_AES_CTR:
158 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
159 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
160 sess->cipher.engine = CCP_ENGINE_AES;
162 case RTE_CRYPTO_CIPHER_AES_ECB:
163 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
164 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
165 sess->cipher.engine = CCP_ENGINE_AES;
167 case RTE_CRYPTO_CIPHER_AES_CBC:
168 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
169 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
170 sess->cipher.engine = CCP_ENGINE_AES;
172 case RTE_CRYPTO_CIPHER_3DES_CBC:
173 sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
174 sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
175 sess->cipher.engine = CCP_ENGINE_3DES;
178 CCP_LOG_ERR("Unsupported cipher algo");
183 switch (sess->cipher.engine) {
185 if (sess->cipher.key_length == 16)
186 sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
187 else if (sess->cipher.key_length == 24)
188 sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
189 else if (sess->cipher.key_length == 32)
190 sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
192 CCP_LOG_ERR("Invalid cipher key length");
195 for (i = 0; i < sess->cipher.key_length ; i++)
196 sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
199 case CCP_ENGINE_3DES:
200 if (sess->cipher.key_length == 16)
201 sess->cipher.ut.des_type = CCP_DES_TYPE_128;
202 else if (sess->cipher.key_length == 24)
203 sess->cipher.ut.des_type = CCP_DES_TYPE_192;
205 CCP_LOG_ERR("Invalid cipher key length");
208 for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
209 for (i = 0; i < 8; i++)
210 sess->cipher.key_ccp[(8 + x) - i - 1] =
211 sess->cipher.key[i + x];
214 CCP_LOG_ERR("Invalid CCP Engine");
217 sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
218 sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
223 ccp_configure_session_auth(struct ccp_session *sess,
224 const struct rte_crypto_sym_xform *xform)
226 const struct rte_crypto_auth_xform *auth_xform = NULL;
229 auth_xform = &xform->auth;
231 sess->auth.digest_length = auth_xform->digest_length;
232 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
233 sess->auth.op = CCP_AUTH_OP_GENERATE;
235 sess->auth.op = CCP_AUTH_OP_VERIFY;
236 switch (auth_xform->algo) {
237 case RTE_CRYPTO_AUTH_AES_CMAC:
238 sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
239 sess->auth.engine = CCP_ENGINE_AES;
240 sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
241 sess->auth.key_length = auth_xform->key.length;
242 /**<padding and hash result*/
243 sess->auth.ctx_len = CCP_SB_BYTES << 1;
244 sess->auth.offset = AES_BLOCK_SIZE;
245 sess->auth.block_size = AES_BLOCK_SIZE;
246 if (sess->auth.key_length == 16)
247 sess->auth.ut.aes_type = CCP_AES_TYPE_128;
248 else if (sess->auth.key_length == 24)
249 sess->auth.ut.aes_type = CCP_AES_TYPE_192;
250 else if (sess->auth.key_length == 32)
251 sess->auth.ut.aes_type = CCP_AES_TYPE_256;
253 CCP_LOG_ERR("Invalid CMAC key length");
256 rte_memcpy(sess->auth.key, auth_xform->key.data,
257 sess->auth.key_length);
258 for (i = 0; i < sess->auth.key_length; i++)
259 sess->auth.key_ccp[sess->auth.key_length - i - 1] =
261 if (generate_cmac_subkeys(sess))
265 CCP_LOG_ERR("Unsupported hash algo");
272 ccp_configure_session_aead(struct ccp_session *sess,
273 const struct rte_crypto_sym_xform *xform)
275 const struct rte_crypto_aead_xform *aead_xform = NULL;
277 aead_xform = &xform->aead;
279 sess->cipher.key_length = aead_xform->key.length;
280 rte_memcpy(sess->cipher.key, aead_xform->key.data,
281 aead_xform->key.length);
283 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
284 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
285 sess->auth.op = CCP_AUTH_OP_GENERATE;
287 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
288 sess->auth.op = CCP_AUTH_OP_VERIFY;
290 sess->auth.aad_length = aead_xform->aad_length;
291 sess->auth.digest_length = aead_xform->digest_length;
293 /* set iv parameters */
294 sess->iv.offset = aead_xform->iv.offset;
295 sess->iv.length = aead_xform->iv.length;
297 switch (aead_xform->algo) {
299 CCP_LOG_ERR("Unsupported aead algo");
306 ccp_set_session_parameters(struct ccp_session *sess,
307 const struct rte_crypto_sym_xform *xform)
309 const struct rte_crypto_sym_xform *cipher_xform = NULL;
310 const struct rte_crypto_sym_xform *auth_xform = NULL;
311 const struct rte_crypto_sym_xform *aead_xform = NULL;
314 sess->cmd_id = ccp_get_cmd_id(xform);
316 switch (sess->cmd_id) {
318 cipher_xform = xform;
323 case CCP_CMD_CIPHER_HASH:
324 cipher_xform = xform;
325 auth_xform = xform->next;
327 case CCP_CMD_HASH_CIPHER:
329 cipher_xform = xform->next;
331 case CCP_CMD_COMBINED:
335 CCP_LOG_ERR("Unsupported cmd_id");
339 /* Default IV length = 0 */
342 ret = ccp_configure_session_cipher(sess, cipher_xform);
344 CCP_LOG_ERR("Invalid/unsupported cipher parameters");
349 ret = ccp_configure_session_auth(sess, auth_xform);
351 CCP_LOG_ERR("Invalid/unsupported auth parameters");
356 ret = ccp_configure_session_aead(sess, aead_xform);
358 CCP_LOG_ERR("Invalid/unsupported aead parameters");
365 /* calculate CCP descriptors requirement */
367 ccp_cipher_slot(struct ccp_session *session)
371 switch (session->cipher.algo) {
372 case CCP_CIPHER_ALGO_AES_CBC:
374 /**< op + passthrough for iv */
376 case CCP_CIPHER_ALGO_AES_ECB:
380 case CCP_CIPHER_ALGO_AES_CTR:
382 /**< op + passthrough for iv */
384 case CCP_CIPHER_ALGO_3DES_CBC:
386 /**< op + passthrough for iv */
389 CCP_LOG_ERR("Unsupported cipher algo %d",
390 session->cipher.algo);
396 ccp_auth_slot(struct ccp_session *session)
400 switch (session->auth.algo) {
401 case CCP_AUTH_ALGO_AES_CMAC:
405 * extra descriptor in padding case
406 * (k1/k2(255:128) with iv(127:0))
411 CCP_LOG_ERR("Unsupported auth algo %d",
419 ccp_aead_slot(struct ccp_session *session)
423 switch (session->aead_algo) {
425 CCP_LOG_ERR("Unsupported aead algo %d",
432 ccp_compute_slot_count(struct ccp_session *session)
436 switch (session->cmd_id) {
438 count = ccp_cipher_slot(session);
441 count = ccp_auth_slot(session);
443 case CCP_CMD_CIPHER_HASH:
444 case CCP_CMD_HASH_CIPHER:
445 count = ccp_cipher_slot(session);
446 count += ccp_auth_slot(session);
448 case CCP_CMD_COMBINED:
449 count = ccp_aead_slot(session);
452 CCP_LOG_ERR("Unsupported cmd_id");
460 ccp_perform_passthru(struct ccp_passthru *pst,
461 struct ccp_queue *cmd_q)
463 struct ccp_desc *desc;
464 union ccp_function function;
466 desc = &cmd_q->qbase_desc[cmd_q->qidx];
468 CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
470 CCP_CMD_SOC(desc) = 0;
471 CCP_CMD_IOC(desc) = 0;
472 CCP_CMD_INIT(desc) = 0;
473 CCP_CMD_EOM(desc) = 0;
474 CCP_CMD_PROT(desc) = 0;
477 CCP_PT_BYTESWAP(&function) = pst->byte_swap;
478 CCP_PT_BITWISE(&function) = pst->bit_mod;
479 CCP_CMD_FUNCTION(desc) = function.raw;
481 CCP_CMD_LEN(desc) = pst->len;
484 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
485 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
486 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
488 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
489 CCP_CMD_DST_HI(desc) = 0;
490 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
492 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
493 CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
496 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
497 CCP_CMD_SRC_HI(desc) = 0;
498 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
500 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
501 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
502 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
505 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
509 ccp_perform_aes_cmac(struct rte_crypto_op *op,
510 struct ccp_queue *cmd_q)
512 struct ccp_session *session;
513 union ccp_function function;
514 struct ccp_passthru pst;
515 struct ccp_desc *desc;
517 uint8_t *src_tb, *append_ptr, *ctx_addr;
518 phys_addr_t src_addr, dest_addr, key_addr;
519 int length, non_align_len;
521 session = (struct ccp_session *)get_session_private_data(
523 ccp_cryptodev_driver_id);
524 key_addr = rte_mem_virt2phy(session->auth.key_ccp);
526 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
527 op->sym->auth.data.offset);
528 append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
529 session->auth.ctx_len);
530 dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
533 CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
534 CCP_AES_MODE(&function) = session->auth.um.aes_mode;
535 CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
537 if (op->sym->auth.data.length % session->auth.block_size == 0) {
539 ctx_addr = session->auth.pre_compute;
540 memset(ctx_addr, 0, AES_BLOCK_SIZE);
541 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
542 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
543 pst.len = CCP_SB_BYTES;
545 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
546 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
547 ccp_perform_passthru(&pst, cmd_q);
549 desc = &cmd_q->qbase_desc[cmd_q->qidx];
550 memset(desc, 0, Q_DESC_SIZE);
552 /* prepare desc for aes-cmac command */
553 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
554 CCP_CMD_EOM(desc) = 1;
555 CCP_CMD_FUNCTION(desc) = function.raw;
557 CCP_CMD_LEN(desc) = op->sym->auth.data.length;
558 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
559 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
560 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
562 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
563 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
564 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
565 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
567 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
572 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
573 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
574 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
575 cmd_q->qcontrol | CMD_Q_RUN);
577 ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
578 memset(ctx_addr, 0, AES_BLOCK_SIZE);
579 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
580 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
581 pst.len = CCP_SB_BYTES;
583 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
584 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
585 ccp_perform_passthru(&pst, cmd_q);
587 length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
588 length *= AES_BLOCK_SIZE;
589 non_align_len = op->sym->auth.data.length - length;
590 /* prepare desc for aes-cmac command */
592 desc = &cmd_q->qbase_desc[cmd_q->qidx];
593 memset(desc, 0, Q_DESC_SIZE);
595 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
596 CCP_CMD_INIT(desc) = 1;
597 CCP_CMD_FUNCTION(desc) = function.raw;
599 CCP_CMD_LEN(desc) = length;
600 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
601 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
602 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
604 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
605 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
606 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
607 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
609 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
612 append_ptr = append_ptr + CCP_SB_BYTES;
613 memset(append_ptr, 0, AES_BLOCK_SIZE);
614 src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
616 op->sym->auth.data.offset +
618 rte_memcpy(append_ptr, src_tb, non_align_len);
619 append_ptr[non_align_len] = CMAC_PAD_VALUE;
621 desc = &cmd_q->qbase_desc[cmd_q->qidx];
622 memset(desc, 0, Q_DESC_SIZE);
624 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
625 CCP_CMD_EOM(desc) = 1;
626 CCP_CMD_FUNCTION(desc) = function.raw;
627 CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
629 CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
630 CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
631 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
633 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
634 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
635 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
636 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
638 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
642 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
643 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
644 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
645 cmd_q->qcontrol | CMD_Q_RUN);
647 /* Retrieve result */
648 pst.dest_addr = dest_addr;
649 pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
650 pst.len = CCP_SB_BYTES;
652 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
653 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
654 ccp_perform_passthru(&pst, cmd_q);
656 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
661 ccp_perform_aes(struct rte_crypto_op *op,
662 struct ccp_queue *cmd_q,
663 struct ccp_batch_info *b_info)
665 struct ccp_session *session;
666 union ccp_function function;
668 struct ccp_passthru pst = {0};
669 struct ccp_desc *desc;
670 phys_addr_t src_addr, dest_addr, key_addr;
673 session = (struct ccp_session *)get_session_private_data(
675 ccp_cryptodev_driver_id);
678 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
679 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
680 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
681 rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
682 iv, session->iv.length);
683 pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
684 CCP_AES_SIZE(&function) = 0x1F;
687 &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
689 (CCP_SB_BYTES - session->iv.length),
690 iv, session->iv.length);
691 pst.src_addr = b_info->lsb_buf_phys +
692 (b_info->lsb_buf_idx * CCP_SB_BYTES);
693 b_info->lsb_buf_idx++;
696 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
697 pst.len = CCP_SB_BYTES;
699 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
700 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
701 ccp_perform_passthru(&pst, cmd_q);
704 desc = &cmd_q->qbase_desc[cmd_q->qidx];
706 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
707 op->sym->cipher.data.offset);
708 if (likely(op->sym->m_dst != NULL))
709 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
710 op->sym->cipher.data.offset);
712 dest_addr = src_addr;
713 key_addr = session->cipher.key_phys;
715 /* prepare desc for aes command */
716 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
717 CCP_CMD_INIT(desc) = 1;
718 CCP_CMD_EOM(desc) = 1;
720 CCP_AES_ENCRYPT(&function) = session->cipher.dir;
721 CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
722 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
723 CCP_CMD_FUNCTION(desc) = function.raw;
725 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
727 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
728 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
729 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
731 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
732 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
733 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
735 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
736 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
737 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
739 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
740 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
742 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
743 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
748 ccp_perform_3des(struct rte_crypto_op *op,
749 struct ccp_queue *cmd_q,
750 struct ccp_batch_info *b_info)
752 struct ccp_session *session;
753 union ccp_function function;
754 unsigned char *lsb_buf;
755 struct ccp_passthru pst;
756 struct ccp_desc *desc;
759 phys_addr_t src_addr, dest_addr, key_addr;
761 session = (struct ccp_session *)get_session_private_data(
763 ccp_cryptodev_driver_id);
765 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
766 switch (session->cipher.um.des_mode) {
767 case CCP_DES_MODE_CBC:
768 lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
769 b_info->lsb_buf_idx++;
771 rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
772 iv, session->iv.length);
774 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
775 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
776 pst.len = CCP_SB_BYTES;
778 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
779 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
780 ccp_perform_passthru(&pst, cmd_q);
782 case CCP_DES_MODE_CFB:
783 case CCP_DES_MODE_ECB:
784 CCP_LOG_ERR("Unsupported DES cipher mode");
788 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
789 op->sym->cipher.data.offset);
790 if (unlikely(op->sym->m_dst != NULL))
792 rte_pktmbuf_mtophys_offset(op->sym->m_dst,
793 op->sym->cipher.data.offset);
795 dest_addr = src_addr;
797 key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
799 desc = &cmd_q->qbase_desc[cmd_q->qidx];
801 memset(desc, 0, Q_DESC_SIZE);
803 /* prepare desc for des command */
804 CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
806 CCP_CMD_SOC(desc) = 0;
807 CCP_CMD_IOC(desc) = 0;
808 CCP_CMD_INIT(desc) = 1;
809 CCP_CMD_EOM(desc) = 1;
810 CCP_CMD_PROT(desc) = 0;
813 CCP_DES_ENCRYPT(&function) = session->cipher.dir;
814 CCP_DES_MODE(&function) = session->cipher.um.des_mode;
815 CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
816 CCP_CMD_FUNCTION(desc) = function.raw;
818 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
820 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
821 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
822 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
824 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
825 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
826 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
828 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
829 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
830 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
832 if (session->cipher.um.des_mode)
833 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
835 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
839 /* Write the new tail address back to the queue register */
840 tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
841 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
842 /* Turn the queue back on using our cached control register */
843 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
844 cmd_q->qcontrol | CMD_Q_RUN);
846 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
851 ccp_crypto_cipher(struct rte_crypto_op *op,
852 struct ccp_queue *cmd_q,
853 struct ccp_batch_info *b_info)
856 struct ccp_session *session;
858 session = (struct ccp_session *)get_session_private_data(
860 ccp_cryptodev_driver_id);
862 switch (session->cipher.algo) {
863 case CCP_CIPHER_ALGO_AES_CBC:
864 result = ccp_perform_aes(op, cmd_q, b_info);
865 b_info->desccnt += 2;
867 case CCP_CIPHER_ALGO_AES_CTR:
868 result = ccp_perform_aes(op, cmd_q, b_info);
869 b_info->desccnt += 2;
871 case CCP_CIPHER_ALGO_AES_ECB:
872 result = ccp_perform_aes(op, cmd_q, b_info);
873 b_info->desccnt += 1;
875 case CCP_CIPHER_ALGO_3DES_CBC:
876 result = ccp_perform_3des(op, cmd_q, b_info);
877 b_info->desccnt += 2;
880 CCP_LOG_ERR("Unsupported cipher algo %d",
881 session->cipher.algo);
888 ccp_crypto_auth(struct rte_crypto_op *op,
889 struct ccp_queue *cmd_q,
890 struct ccp_batch_info *b_info)
894 struct ccp_session *session;
896 session = (struct ccp_session *)get_session_private_data(
898 ccp_cryptodev_driver_id);
900 switch (session->auth.algo) {
901 case CCP_AUTH_ALGO_AES_CMAC:
902 result = ccp_perform_aes_cmac(op, cmd_q);
903 b_info->desccnt += 4;
906 CCP_LOG_ERR("Unsupported auth algo %d",
915 ccp_crypto_aead(struct rte_crypto_op *op,
916 struct ccp_queue *cmd_q __rte_unused,
917 struct ccp_batch_info *b_info __rte_unused)
920 struct ccp_session *session;
922 session = (struct ccp_session *)get_session_private_data(
924 ccp_cryptodev_driver_id);
926 switch (session->aead_algo) {
928 CCP_LOG_ERR("Unsupported aead algo %d",
936 process_ops_to_enqueue(const struct ccp_qp *qp,
937 struct rte_crypto_op **op,
938 struct ccp_queue *cmd_q,
943 struct ccp_batch_info *b_info;
944 struct ccp_session *session;
946 if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
947 CCP_LOG_ERR("batch info allocation failed");
950 /* populate batch info necessary for dequeue */
952 b_info->lsb_buf_idx = 0;
954 b_info->cmd_q = cmd_q;
955 b_info->lsb_buf_phys =
956 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
957 rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
959 b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
961 for (i = 0; i < nb_ops; i++) {
962 session = (struct ccp_session *)get_session_private_data(
964 ccp_cryptodev_driver_id);
965 switch (session->cmd_id) {
967 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
970 result = ccp_crypto_auth(op[i], cmd_q, b_info);
972 case CCP_CMD_CIPHER_HASH:
973 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
976 result = ccp_crypto_auth(op[i], cmd_q, b_info);
978 case CCP_CMD_HASH_CIPHER:
979 result = ccp_crypto_auth(op[i], cmd_q, b_info);
982 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
984 case CCP_CMD_COMBINED:
985 result = ccp_crypto_aead(op[i], cmd_q, b_info);
988 CCP_LOG_ERR("Unsupported cmd_id");
991 if (unlikely(result < 0)) {
992 rte_atomic64_add(&b_info->cmd_q->free_slots,
993 (slots_req - b_info->desccnt));
996 b_info->op[i] = op[i];
1000 b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1004 /* Write the new tail address back to the queue register */
1005 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
1006 b_info->tail_offset);
1007 /* Turn the queue back on using our cached control register */
1008 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1009 cmd_q->qcontrol | CMD_Q_RUN);
1011 rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
1016 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
1018 struct ccp_session *session;
1019 uint8_t *digest_data, *addr;
1020 struct rte_mbuf *m_last;
1021 int offset, digest_offset;
1022 uint8_t digest_le[64];
1024 session = (struct ccp_session *)get_session_private_data(
1026 ccp_cryptodev_driver_id);
1028 if (session->cmd_id == CCP_CMD_COMBINED) {
1029 digest_data = op->sym->aead.digest.data;
1030 digest_offset = op->sym->aead.data.offset +
1031 op->sym->aead.data.length;
1033 digest_data = op->sym->auth.digest.data;
1034 digest_offset = op->sym->auth.data.offset +
1035 op->sym->auth.data.length;
1037 m_last = rte_pktmbuf_lastseg(op->sym->m_src);
1038 addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
1039 m_last->data_len - session->auth.ctx_len);
1042 offset = session->auth.offset;
1044 if (session->auth.engine == CCP_ENGINE_SHA)
1045 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
1046 (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
1047 (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
1048 /* All other algorithms require byte
1053 offset = session->auth.ctx_len -
1054 session->auth.offset - 1;
1055 for (i = 0; i < session->auth.digest_length; i++)
1056 digest_le[i] = addr[offset - i];
1061 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1062 if (session->auth.op == CCP_AUTH_OP_VERIFY) {
1063 if (memcmp(addr + offset, digest_data,
1064 session->auth.digest_length) != 0)
1065 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1068 if (unlikely(digest_data == 0))
1069 digest_data = rte_pktmbuf_mtod_offset(
1070 op->sym->m_dst, uint8_t *,
1072 rte_memcpy(digest_data, addr + offset,
1073 session->auth.digest_length);
1075 /* Trim area used for digest from mbuf. */
1076 rte_pktmbuf_trim(op->sym->m_src,
1077 session->auth.ctx_len);
1081 ccp_prepare_ops(struct rte_crypto_op **op_d,
1082 struct ccp_batch_info *b_info,
1086 struct ccp_session *session;
1088 min_ops = RTE_MIN(nb_ops, b_info->opcnt);
1090 for (i = 0; i < min_ops; i++) {
1091 op_d[i] = b_info->op[b_info->op_idx++];
1092 session = (struct ccp_session *)get_session_private_data(
1093 op_d[i]->sym->session,
1094 ccp_cryptodev_driver_id);
1095 switch (session->cmd_id) {
1096 case CCP_CMD_CIPHER:
1097 op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1100 case CCP_CMD_CIPHER_HASH:
1101 case CCP_CMD_HASH_CIPHER:
1102 case CCP_CMD_COMBINED:
1103 ccp_auth_dq_prepare(op_d[i]);
1106 CCP_LOG_ERR("Unsupported cmd_id");
1110 b_info->opcnt -= min_ops;
1115 process_ops_to_dequeue(struct ccp_qp *qp,
1116 struct rte_crypto_op **op,
1119 struct ccp_batch_info *b_info;
1120 uint32_t cur_head_offset;
1122 if (qp->b_info != NULL) {
1123 b_info = qp->b_info;
1124 if (unlikely(b_info->op_idx > 0))
1126 } else if (rte_ring_dequeue(qp->processed_pkts,
1129 cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
1130 CMD_Q_HEAD_LO_BASE);
1132 if (b_info->head_offset < b_info->tail_offset) {
1133 if ((cur_head_offset >= b_info->head_offset) &&
1134 (cur_head_offset < b_info->tail_offset)) {
1135 qp->b_info = b_info;
1139 if ((cur_head_offset >= b_info->head_offset) ||
1140 (cur_head_offset < b_info->tail_offset)) {
1141 qp->b_info = b_info;
1148 nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
1149 rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
1150 b_info->desccnt = 0;
1151 if (b_info->opcnt > 0) {
1152 qp->b_info = b_info;
1154 rte_mempool_put(qp->batch_mp, (void *)b_info);