1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
10 #include <sys/queue.h>
11 #include <sys/types.h>
13 #include <openssl/cmac.h> /*sub key apis*/
14 #include <openssl/evp.h> /*sub key apis*/
16 #include <rte_hexdump.h>
17 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_spinlock.h>
21 #include <rte_string_fns.h>
22 #include <rte_cryptodev_pmd.h>
25 #include "ccp_crypto.h"
27 #include "ccp_pmd_private.h"
29 static enum ccp_cmd_order
30 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
32 enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
36 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
37 if (xform->next == NULL)
39 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
40 return CCP_CMD_HASH_CIPHER;
42 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
43 if (xform->next == NULL)
44 return CCP_CMD_CIPHER;
45 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
46 return CCP_CMD_CIPHER_HASH;
48 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
49 return CCP_CMD_COMBINED;
53 /* prepare temporary keys K1 and K2 */
54 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
57 /* Shift block to left, including carry */
58 for (i = 0; i < bl; i++) {
60 if (i < bl - 1 && l[i + 1] & 0x80)
63 /* If MSB set fixup with R */
65 k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
68 /* subkeys K1 and K2 generation for CMAC */
70 generate_cmac_subkeys(struct ccp_session *sess)
72 const EVP_CIPHER *algo;
74 unsigned char *ccp_ctx;
77 unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
78 unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
79 unsigned char k1[AES_BLOCK_SIZE] = {0};
80 unsigned char k2[AES_BLOCK_SIZE] = {0};
82 if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
83 algo = EVP_aes_128_cbc();
84 else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
85 algo = EVP_aes_192_cbc();
86 else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
87 algo = EVP_aes_256_cbc();
89 CCP_LOG_ERR("Invalid CMAC type length");
93 ctx = EVP_CIPHER_CTX_new();
95 CCP_LOG_ERR("ctx creation failed");
98 if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
99 (unsigned char *)zero_iv) <= 0)
100 goto key_generate_err;
101 if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
102 goto key_generate_err;
103 if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
104 AES_BLOCK_SIZE) <= 0)
105 goto key_generate_err;
106 if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
107 goto key_generate_err;
109 memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
111 ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
112 prepare_key(k1, dst, AES_BLOCK_SIZE);
113 for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
116 ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
117 (2 * CCP_SB_BYTES) - 1);
118 prepare_key(k2, k1, AES_BLOCK_SIZE);
119 for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
122 EVP_CIPHER_CTX_free(ctx);
127 CCP_LOG_ERR("CMAC Init failed");
131 /* configure session */
133 ccp_configure_session_cipher(struct ccp_session *sess,
134 const struct rte_crypto_sym_xform *xform)
136 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
139 cipher_xform = &xform->cipher;
141 /* set cipher direction */
142 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
143 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
145 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
148 sess->cipher.key_length = cipher_xform->key.length;
149 rte_memcpy(sess->cipher.key, cipher_xform->key.data,
150 cipher_xform->key.length);
152 /* set iv parameters */
153 sess->iv.offset = cipher_xform->iv.offset;
154 sess->iv.length = cipher_xform->iv.length;
156 switch (cipher_xform->algo) {
157 case RTE_CRYPTO_CIPHER_AES_CTR:
158 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
159 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
160 sess->cipher.engine = CCP_ENGINE_AES;
162 case RTE_CRYPTO_CIPHER_AES_ECB:
163 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
164 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
165 sess->cipher.engine = CCP_ENGINE_AES;
167 case RTE_CRYPTO_CIPHER_AES_CBC:
168 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
169 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
170 sess->cipher.engine = CCP_ENGINE_AES;
172 case RTE_CRYPTO_CIPHER_3DES_CBC:
173 sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
174 sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
175 sess->cipher.engine = CCP_ENGINE_3DES;
178 CCP_LOG_ERR("Unsupported cipher algo");
183 switch (sess->cipher.engine) {
185 if (sess->cipher.key_length == 16)
186 sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
187 else if (sess->cipher.key_length == 24)
188 sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
189 else if (sess->cipher.key_length == 32)
190 sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
192 CCP_LOG_ERR("Invalid cipher key length");
195 for (i = 0; i < sess->cipher.key_length ; i++)
196 sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
199 case CCP_ENGINE_3DES:
200 if (sess->cipher.key_length == 16)
201 sess->cipher.ut.des_type = CCP_DES_TYPE_128;
202 else if (sess->cipher.key_length == 24)
203 sess->cipher.ut.des_type = CCP_DES_TYPE_192;
205 CCP_LOG_ERR("Invalid cipher key length");
208 for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
209 for (i = 0; i < 8; i++)
210 sess->cipher.key_ccp[(8 + x) - i - 1] =
211 sess->cipher.key[i + x];
214 CCP_LOG_ERR("Invalid CCP Engine");
217 sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
218 sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
223 ccp_configure_session_auth(struct ccp_session *sess,
224 const struct rte_crypto_sym_xform *xform)
226 const struct rte_crypto_auth_xform *auth_xform = NULL;
229 auth_xform = &xform->auth;
231 sess->auth.digest_length = auth_xform->digest_length;
232 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
233 sess->auth.op = CCP_AUTH_OP_GENERATE;
235 sess->auth.op = CCP_AUTH_OP_VERIFY;
236 switch (auth_xform->algo) {
237 case RTE_CRYPTO_AUTH_AES_CMAC:
238 sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
239 sess->auth.engine = CCP_ENGINE_AES;
240 sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
241 sess->auth.key_length = auth_xform->key.length;
242 /**<padding and hash result*/
243 sess->auth.ctx_len = CCP_SB_BYTES << 1;
244 sess->auth.offset = AES_BLOCK_SIZE;
245 sess->auth.block_size = AES_BLOCK_SIZE;
246 if (sess->auth.key_length == 16)
247 sess->auth.ut.aes_type = CCP_AES_TYPE_128;
248 else if (sess->auth.key_length == 24)
249 sess->auth.ut.aes_type = CCP_AES_TYPE_192;
250 else if (sess->auth.key_length == 32)
251 sess->auth.ut.aes_type = CCP_AES_TYPE_256;
253 CCP_LOG_ERR("Invalid CMAC key length");
256 rte_memcpy(sess->auth.key, auth_xform->key.data,
257 sess->auth.key_length);
258 for (i = 0; i < sess->auth.key_length; i++)
259 sess->auth.key_ccp[sess->auth.key_length - i - 1] =
261 if (generate_cmac_subkeys(sess))
265 CCP_LOG_ERR("Unsupported hash algo");
272 ccp_configure_session_aead(struct ccp_session *sess,
273 const struct rte_crypto_sym_xform *xform)
275 const struct rte_crypto_aead_xform *aead_xform = NULL;
278 aead_xform = &xform->aead;
280 sess->cipher.key_length = aead_xform->key.length;
281 rte_memcpy(sess->cipher.key, aead_xform->key.data,
282 aead_xform->key.length);
284 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
285 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
286 sess->auth.op = CCP_AUTH_OP_GENERATE;
288 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
289 sess->auth.op = CCP_AUTH_OP_VERIFY;
291 sess->aead_algo = aead_xform->algo;
292 sess->auth.aad_length = aead_xform->aad_length;
293 sess->auth.digest_length = aead_xform->digest_length;
295 /* set iv parameters */
296 sess->iv.offset = aead_xform->iv.offset;
297 sess->iv.length = aead_xform->iv.length;
299 switch (aead_xform->algo) {
300 case RTE_CRYPTO_AEAD_AES_GCM:
301 sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
302 sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
303 sess->cipher.engine = CCP_ENGINE_AES;
304 if (sess->cipher.key_length == 16)
305 sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
306 else if (sess->cipher.key_length == 24)
307 sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
308 else if (sess->cipher.key_length == 32)
309 sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
311 CCP_LOG_ERR("Invalid aead key length");
314 for (i = 0; i < sess->cipher.key_length; i++)
315 sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
317 sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
318 sess->auth.engine = CCP_ENGINE_AES;
319 sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
320 sess->auth.ctx_len = CCP_SB_BYTES;
321 sess->auth.offset = 0;
322 sess->auth.block_size = AES_BLOCK_SIZE;
323 sess->cmd_id = CCP_CMD_COMBINED;
326 CCP_LOG_ERR("Unsupported aead algo");
329 sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
330 sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
335 ccp_set_session_parameters(struct ccp_session *sess,
336 const struct rte_crypto_sym_xform *xform)
338 const struct rte_crypto_sym_xform *cipher_xform = NULL;
339 const struct rte_crypto_sym_xform *auth_xform = NULL;
340 const struct rte_crypto_sym_xform *aead_xform = NULL;
343 sess->cmd_id = ccp_get_cmd_id(xform);
345 switch (sess->cmd_id) {
347 cipher_xform = xform;
352 case CCP_CMD_CIPHER_HASH:
353 cipher_xform = xform;
354 auth_xform = xform->next;
356 case CCP_CMD_HASH_CIPHER:
358 cipher_xform = xform->next;
360 case CCP_CMD_COMBINED:
364 CCP_LOG_ERR("Unsupported cmd_id");
368 /* Default IV length = 0 */
371 ret = ccp_configure_session_cipher(sess, cipher_xform);
373 CCP_LOG_ERR("Invalid/unsupported cipher parameters");
378 ret = ccp_configure_session_auth(sess, auth_xform);
380 CCP_LOG_ERR("Invalid/unsupported auth parameters");
385 ret = ccp_configure_session_aead(sess, aead_xform);
387 CCP_LOG_ERR("Invalid/unsupported aead parameters");
394 /* calculate CCP descriptors requirement */
396 ccp_cipher_slot(struct ccp_session *session)
400 switch (session->cipher.algo) {
401 case CCP_CIPHER_ALGO_AES_CBC:
403 /**< op + passthrough for iv */
405 case CCP_CIPHER_ALGO_AES_ECB:
409 case CCP_CIPHER_ALGO_AES_CTR:
411 /**< op + passthrough for iv */
413 case CCP_CIPHER_ALGO_3DES_CBC:
415 /**< op + passthrough for iv */
418 CCP_LOG_ERR("Unsupported cipher algo %d",
419 session->cipher.algo);
425 ccp_auth_slot(struct ccp_session *session)
429 switch (session->auth.algo) {
430 case CCP_AUTH_ALGO_AES_CMAC:
434 * extra descriptor in padding case
435 * (k1/k2(255:128) with iv(127:0))
440 CCP_LOG_ERR("Unsupported auth algo %d",
448 ccp_aead_slot(struct ccp_session *session)
452 switch (session->aead_algo) {
453 case RTE_CRYPTO_AEAD_AES_GCM:
456 CCP_LOG_ERR("Unsupported aead algo %d",
459 switch (session->auth.algo) {
460 case CCP_AUTH_ALGO_AES_GCM:
471 CCP_LOG_ERR("Unsupported combined auth ALGO %d",
478 ccp_compute_slot_count(struct ccp_session *session)
482 switch (session->cmd_id) {
484 count = ccp_cipher_slot(session);
487 count = ccp_auth_slot(session);
489 case CCP_CMD_CIPHER_HASH:
490 case CCP_CMD_HASH_CIPHER:
491 count = ccp_cipher_slot(session);
492 count += ccp_auth_slot(session);
494 case CCP_CMD_COMBINED:
495 count = ccp_aead_slot(session);
498 CCP_LOG_ERR("Unsupported cmd_id");
506 ccp_perform_passthru(struct ccp_passthru *pst,
507 struct ccp_queue *cmd_q)
509 struct ccp_desc *desc;
510 union ccp_function function;
512 desc = &cmd_q->qbase_desc[cmd_q->qidx];
514 CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
516 CCP_CMD_SOC(desc) = 0;
517 CCP_CMD_IOC(desc) = 0;
518 CCP_CMD_INIT(desc) = 0;
519 CCP_CMD_EOM(desc) = 0;
520 CCP_CMD_PROT(desc) = 0;
523 CCP_PT_BYTESWAP(&function) = pst->byte_swap;
524 CCP_PT_BITWISE(&function) = pst->bit_mod;
525 CCP_CMD_FUNCTION(desc) = function.raw;
527 CCP_CMD_LEN(desc) = pst->len;
530 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
531 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
532 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
534 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
535 CCP_CMD_DST_HI(desc) = 0;
536 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
538 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
539 CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
542 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
543 CCP_CMD_SRC_HI(desc) = 0;
544 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
546 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
547 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
548 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
551 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
555 ccp_perform_aes_cmac(struct rte_crypto_op *op,
556 struct ccp_queue *cmd_q)
558 struct ccp_session *session;
559 union ccp_function function;
560 struct ccp_passthru pst;
561 struct ccp_desc *desc;
563 uint8_t *src_tb, *append_ptr, *ctx_addr;
564 phys_addr_t src_addr, dest_addr, key_addr;
565 int length, non_align_len;
567 session = (struct ccp_session *)get_session_private_data(
569 ccp_cryptodev_driver_id);
570 key_addr = rte_mem_virt2phy(session->auth.key_ccp);
572 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
573 op->sym->auth.data.offset);
574 append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
575 session->auth.ctx_len);
576 dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
579 CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
580 CCP_AES_MODE(&function) = session->auth.um.aes_mode;
581 CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
583 if (op->sym->auth.data.length % session->auth.block_size == 0) {
585 ctx_addr = session->auth.pre_compute;
586 memset(ctx_addr, 0, AES_BLOCK_SIZE);
587 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
588 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
589 pst.len = CCP_SB_BYTES;
591 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
592 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
593 ccp_perform_passthru(&pst, cmd_q);
595 desc = &cmd_q->qbase_desc[cmd_q->qidx];
596 memset(desc, 0, Q_DESC_SIZE);
598 /* prepare desc for aes-cmac command */
599 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
600 CCP_CMD_EOM(desc) = 1;
601 CCP_CMD_FUNCTION(desc) = function.raw;
603 CCP_CMD_LEN(desc) = op->sym->auth.data.length;
604 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
605 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
606 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
608 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
609 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
610 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
611 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
613 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
618 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
619 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
620 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
621 cmd_q->qcontrol | CMD_Q_RUN);
623 ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
624 memset(ctx_addr, 0, AES_BLOCK_SIZE);
625 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
626 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
627 pst.len = CCP_SB_BYTES;
629 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
630 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
631 ccp_perform_passthru(&pst, cmd_q);
633 length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
634 length *= AES_BLOCK_SIZE;
635 non_align_len = op->sym->auth.data.length - length;
636 /* prepare desc for aes-cmac command */
638 desc = &cmd_q->qbase_desc[cmd_q->qidx];
639 memset(desc, 0, Q_DESC_SIZE);
641 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
642 CCP_CMD_INIT(desc) = 1;
643 CCP_CMD_FUNCTION(desc) = function.raw;
645 CCP_CMD_LEN(desc) = length;
646 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
647 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
648 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
650 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
651 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
652 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
653 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
655 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
658 append_ptr = append_ptr + CCP_SB_BYTES;
659 memset(append_ptr, 0, AES_BLOCK_SIZE);
660 src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
662 op->sym->auth.data.offset +
664 rte_memcpy(append_ptr, src_tb, non_align_len);
665 append_ptr[non_align_len] = CMAC_PAD_VALUE;
667 desc = &cmd_q->qbase_desc[cmd_q->qidx];
668 memset(desc, 0, Q_DESC_SIZE);
670 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
671 CCP_CMD_EOM(desc) = 1;
672 CCP_CMD_FUNCTION(desc) = function.raw;
673 CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
675 CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
676 CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
677 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
679 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
680 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
681 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
682 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
684 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
688 (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
689 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
690 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
691 cmd_q->qcontrol | CMD_Q_RUN);
693 /* Retrieve result */
694 pst.dest_addr = dest_addr;
695 pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
696 pst.len = CCP_SB_BYTES;
698 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
699 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
700 ccp_perform_passthru(&pst, cmd_q);
702 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
707 ccp_perform_aes(struct rte_crypto_op *op,
708 struct ccp_queue *cmd_q,
709 struct ccp_batch_info *b_info)
711 struct ccp_session *session;
712 union ccp_function function;
714 struct ccp_passthru pst = {0};
715 struct ccp_desc *desc;
716 phys_addr_t src_addr, dest_addr, key_addr;
719 session = (struct ccp_session *)get_session_private_data(
721 ccp_cryptodev_driver_id);
724 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
725 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
726 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
727 rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
728 iv, session->iv.length);
729 pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
730 CCP_AES_SIZE(&function) = 0x1F;
733 &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
735 (CCP_SB_BYTES - session->iv.length),
736 iv, session->iv.length);
737 pst.src_addr = b_info->lsb_buf_phys +
738 (b_info->lsb_buf_idx * CCP_SB_BYTES);
739 b_info->lsb_buf_idx++;
742 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
743 pst.len = CCP_SB_BYTES;
745 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
746 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
747 ccp_perform_passthru(&pst, cmd_q);
750 desc = &cmd_q->qbase_desc[cmd_q->qidx];
752 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
753 op->sym->cipher.data.offset);
754 if (likely(op->sym->m_dst != NULL))
755 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
756 op->sym->cipher.data.offset);
758 dest_addr = src_addr;
759 key_addr = session->cipher.key_phys;
761 /* prepare desc for aes command */
762 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
763 CCP_CMD_INIT(desc) = 1;
764 CCP_CMD_EOM(desc) = 1;
766 CCP_AES_ENCRYPT(&function) = session->cipher.dir;
767 CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
768 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
769 CCP_CMD_FUNCTION(desc) = function.raw;
771 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
773 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
774 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
775 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
777 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
778 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
779 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
781 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
782 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
783 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
785 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
786 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
788 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
789 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
794 ccp_perform_3des(struct rte_crypto_op *op,
795 struct ccp_queue *cmd_q,
796 struct ccp_batch_info *b_info)
798 struct ccp_session *session;
799 union ccp_function function;
800 unsigned char *lsb_buf;
801 struct ccp_passthru pst;
802 struct ccp_desc *desc;
805 phys_addr_t src_addr, dest_addr, key_addr;
807 session = (struct ccp_session *)get_session_private_data(
809 ccp_cryptodev_driver_id);
811 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
812 switch (session->cipher.um.des_mode) {
813 case CCP_DES_MODE_CBC:
814 lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
815 b_info->lsb_buf_idx++;
817 rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
818 iv, session->iv.length);
820 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
821 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
822 pst.len = CCP_SB_BYTES;
824 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
825 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
826 ccp_perform_passthru(&pst, cmd_q);
828 case CCP_DES_MODE_CFB:
829 case CCP_DES_MODE_ECB:
830 CCP_LOG_ERR("Unsupported DES cipher mode");
834 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
835 op->sym->cipher.data.offset);
836 if (unlikely(op->sym->m_dst != NULL))
838 rte_pktmbuf_mtophys_offset(op->sym->m_dst,
839 op->sym->cipher.data.offset);
841 dest_addr = src_addr;
843 key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
845 desc = &cmd_q->qbase_desc[cmd_q->qidx];
847 memset(desc, 0, Q_DESC_SIZE);
849 /* prepare desc for des command */
850 CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
852 CCP_CMD_SOC(desc) = 0;
853 CCP_CMD_IOC(desc) = 0;
854 CCP_CMD_INIT(desc) = 1;
855 CCP_CMD_EOM(desc) = 1;
856 CCP_CMD_PROT(desc) = 0;
859 CCP_DES_ENCRYPT(&function) = session->cipher.dir;
860 CCP_DES_MODE(&function) = session->cipher.um.des_mode;
861 CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
862 CCP_CMD_FUNCTION(desc) = function.raw;
864 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
866 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
867 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
868 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
870 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
871 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
872 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
874 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
875 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
876 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
878 if (session->cipher.um.des_mode)
879 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
881 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
885 /* Write the new tail address back to the queue register */
886 tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
887 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
888 /* Turn the queue back on using our cached control register */
889 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
890 cmd_q->qcontrol | CMD_Q_RUN);
892 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
897 ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
899 struct ccp_session *session;
900 union ccp_function function;
902 struct ccp_passthru pst;
903 struct ccp_desc *desc;
906 phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
907 phys_addr_t digest_dest_addr;
908 int length, non_align_len;
910 session = (struct ccp_session *)get_session_private_data(
912 ccp_cryptodev_driver_id);
913 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
914 key_addr = session->cipher.key_phys;
916 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
917 op->sym->aead.data.offset);
918 if (unlikely(op->sym->m_dst != NULL))
919 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
920 op->sym->aead.data.offset);
922 dest_addr = src_addr;
923 rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
924 digest_dest_addr = op->sym->aead.digest.phys_addr;
925 temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
926 *temp++ = rte_bswap64(session->auth.aad_length << 3);
927 *temp = rte_bswap64(op->sym->aead.data.length << 3);
929 non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
930 length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
932 aad_addr = op->sym->aead.aad.phys_addr;
934 /* CMD1 IV Passthru */
935 rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
937 pst.src_addr = session->cipher.nonce_phys;
938 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
939 pst.len = CCP_SB_BYTES;
941 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
942 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
943 ccp_perform_passthru(&pst, cmd_q);
947 CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
948 CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
949 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
951 desc = &cmd_q->qbase_desc[cmd_q->qidx];
952 memset(desc, 0, Q_DESC_SIZE);
954 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
955 CCP_CMD_INIT(desc) = 1;
956 CCP_CMD_FUNCTION(desc) = function.raw;
958 CCP_CMD_LEN(desc) = session->auth.aad_length;
960 CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
961 CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
962 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
964 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
965 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
966 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
968 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
970 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
973 tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
974 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
975 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
976 cmd_q->qcontrol | CMD_Q_RUN);
978 /* CMD3 : GCTR Plain text */
980 CCP_AES_ENCRYPT(&function) = session->cipher.dir;
981 CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
982 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
983 if (non_align_len == 0)
984 CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
986 CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
989 desc = &cmd_q->qbase_desc[cmd_q->qidx];
990 memset(desc, 0, Q_DESC_SIZE);
992 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
993 CCP_CMD_EOM(desc) = 1;
994 CCP_CMD_FUNCTION(desc) = function.raw;
996 CCP_CMD_LEN(desc) = length;
998 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
999 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
1000 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1002 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
1003 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
1004 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1006 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1007 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1008 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1010 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1012 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1015 tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1016 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1017 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1018 cmd_q->qcontrol | CMD_Q_RUN);
1020 /* CMD4 : PT to copy IV */
1021 pst.src_addr = session->cipher.nonce_phys;
1022 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
1023 pst.len = AES_BLOCK_SIZE;
1025 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
1026 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
1027 ccp_perform_passthru(&pst, cmd_q);
1029 /* CMD5 : GHASH-Final */
1031 CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
1032 CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
1033 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
1035 desc = &cmd_q->qbase_desc[cmd_q->qidx];
1036 memset(desc, 0, Q_DESC_SIZE);
1038 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
1039 CCP_CMD_FUNCTION(desc) = function.raw;
1040 /* Last block (AAD_len || PT_len)*/
1041 CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
1043 CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
1044 CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
1045 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1047 CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
1048 CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
1049 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1051 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
1052 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
1053 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
1055 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
1057 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
1060 tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
1061 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
1062 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1063 cmd_q->qcontrol | CMD_Q_RUN);
1065 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1070 ccp_crypto_cipher(struct rte_crypto_op *op,
1071 struct ccp_queue *cmd_q,
1072 struct ccp_batch_info *b_info)
1075 struct ccp_session *session;
1077 session = (struct ccp_session *)get_session_private_data(
1079 ccp_cryptodev_driver_id);
1081 switch (session->cipher.algo) {
1082 case CCP_CIPHER_ALGO_AES_CBC:
1083 result = ccp_perform_aes(op, cmd_q, b_info);
1084 b_info->desccnt += 2;
1086 case CCP_CIPHER_ALGO_AES_CTR:
1087 result = ccp_perform_aes(op, cmd_q, b_info);
1088 b_info->desccnt += 2;
1090 case CCP_CIPHER_ALGO_AES_ECB:
1091 result = ccp_perform_aes(op, cmd_q, b_info);
1092 b_info->desccnt += 1;
1094 case CCP_CIPHER_ALGO_3DES_CBC:
1095 result = ccp_perform_3des(op, cmd_q, b_info);
1096 b_info->desccnt += 2;
1099 CCP_LOG_ERR("Unsupported cipher algo %d",
1100 session->cipher.algo);
1107 ccp_crypto_auth(struct rte_crypto_op *op,
1108 struct ccp_queue *cmd_q,
1109 struct ccp_batch_info *b_info)
1113 struct ccp_session *session;
1115 session = (struct ccp_session *)get_session_private_data(
1117 ccp_cryptodev_driver_id);
1119 switch (session->auth.algo) {
1120 case CCP_AUTH_ALGO_AES_CMAC:
1121 result = ccp_perform_aes_cmac(op, cmd_q);
1122 b_info->desccnt += 4;
1125 CCP_LOG_ERR("Unsupported auth algo %d",
1126 session->auth.algo);
1134 ccp_crypto_aead(struct rte_crypto_op *op,
1135 struct ccp_queue *cmd_q,
1136 struct ccp_batch_info *b_info)
1139 struct ccp_session *session;
1141 session = (struct ccp_session *)get_session_private_data(
1143 ccp_cryptodev_driver_id);
1145 switch (session->auth.algo) {
1146 case CCP_AUTH_ALGO_AES_GCM:
1147 if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
1148 CCP_LOG_ERR("Incorrect chain order");
1151 result = ccp_perform_aes_gcm(op, cmd_q);
1152 b_info->desccnt += 5;
1155 CCP_LOG_ERR("Unsupported aead algo %d",
1156 session->aead_algo);
1163 process_ops_to_enqueue(const struct ccp_qp *qp,
1164 struct rte_crypto_op **op,
1165 struct ccp_queue *cmd_q,
1170 struct ccp_batch_info *b_info;
1171 struct ccp_session *session;
1173 if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
1174 CCP_LOG_ERR("batch info allocation failed");
1177 /* populate batch info necessary for dequeue */
1179 b_info->lsb_buf_idx = 0;
1180 b_info->desccnt = 0;
1181 b_info->cmd_q = cmd_q;
1182 b_info->lsb_buf_phys =
1183 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
1184 rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
1186 b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1188 for (i = 0; i < nb_ops; i++) {
1189 session = (struct ccp_session *)get_session_private_data(
1190 op[i]->sym->session,
1191 ccp_cryptodev_driver_id);
1192 switch (session->cmd_id) {
1193 case CCP_CMD_CIPHER:
1194 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1197 result = ccp_crypto_auth(op[i], cmd_q, b_info);
1199 case CCP_CMD_CIPHER_HASH:
1200 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1203 result = ccp_crypto_auth(op[i], cmd_q, b_info);
1205 case CCP_CMD_HASH_CIPHER:
1206 result = ccp_crypto_auth(op[i], cmd_q, b_info);
1209 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
1211 case CCP_CMD_COMBINED:
1212 result = ccp_crypto_aead(op[i], cmd_q, b_info);
1215 CCP_LOG_ERR("Unsupported cmd_id");
1218 if (unlikely(result < 0)) {
1219 rte_atomic64_add(&b_info->cmd_q->free_slots,
1220 (slots_req - b_info->desccnt));
1223 b_info->op[i] = op[i];
1227 b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
1231 /* Write the new tail address back to the queue register */
1232 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
1233 b_info->tail_offset);
1234 /* Turn the queue back on using our cached control register */
1235 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
1236 cmd_q->qcontrol | CMD_Q_RUN);
1238 rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
1243 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
1245 struct ccp_session *session;
1246 uint8_t *digest_data, *addr;
1247 struct rte_mbuf *m_last;
1248 int offset, digest_offset;
1249 uint8_t digest_le[64];
1251 session = (struct ccp_session *)get_session_private_data(
1253 ccp_cryptodev_driver_id);
1255 if (session->cmd_id == CCP_CMD_COMBINED) {
1256 digest_data = op->sym->aead.digest.data;
1257 digest_offset = op->sym->aead.data.offset +
1258 op->sym->aead.data.length;
1260 digest_data = op->sym->auth.digest.data;
1261 digest_offset = op->sym->auth.data.offset +
1262 op->sym->auth.data.length;
1264 m_last = rte_pktmbuf_lastseg(op->sym->m_src);
1265 addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
1266 m_last->data_len - session->auth.ctx_len);
1269 offset = session->auth.offset;
1271 if (session->auth.engine == CCP_ENGINE_SHA)
1272 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
1273 (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
1274 (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
1275 /* All other algorithms require byte
1280 offset = session->auth.ctx_len -
1281 session->auth.offset - 1;
1282 for (i = 0; i < session->auth.digest_length; i++)
1283 digest_le[i] = addr[offset - i];
1288 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1289 if (session->auth.op == CCP_AUTH_OP_VERIFY) {
1290 if (memcmp(addr + offset, digest_data,
1291 session->auth.digest_length) != 0)
1292 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1295 if (unlikely(digest_data == 0))
1296 digest_data = rte_pktmbuf_mtod_offset(
1297 op->sym->m_dst, uint8_t *,
1299 rte_memcpy(digest_data, addr + offset,
1300 session->auth.digest_length);
1302 /* Trim area used for digest from mbuf. */
1303 rte_pktmbuf_trim(op->sym->m_src,
1304 session->auth.ctx_len);
1308 ccp_prepare_ops(struct rte_crypto_op **op_d,
1309 struct ccp_batch_info *b_info,
1313 struct ccp_session *session;
1315 min_ops = RTE_MIN(nb_ops, b_info->opcnt);
1317 for (i = 0; i < min_ops; i++) {
1318 op_d[i] = b_info->op[b_info->op_idx++];
1319 session = (struct ccp_session *)get_session_private_data(
1320 op_d[i]->sym->session,
1321 ccp_cryptodev_driver_id);
1322 switch (session->cmd_id) {
1323 case CCP_CMD_CIPHER:
1324 op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1327 case CCP_CMD_CIPHER_HASH:
1328 case CCP_CMD_HASH_CIPHER:
1329 case CCP_CMD_COMBINED:
1330 ccp_auth_dq_prepare(op_d[i]);
1333 CCP_LOG_ERR("Unsupported cmd_id");
1337 b_info->opcnt -= min_ops;
1342 process_ops_to_dequeue(struct ccp_qp *qp,
1343 struct rte_crypto_op **op,
1346 struct ccp_batch_info *b_info;
1347 uint32_t cur_head_offset;
1349 if (qp->b_info != NULL) {
1350 b_info = qp->b_info;
1351 if (unlikely(b_info->op_idx > 0))
1353 } else if (rte_ring_dequeue(qp->processed_pkts,
1356 cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
1357 CMD_Q_HEAD_LO_BASE);
1359 if (b_info->head_offset < b_info->tail_offset) {
1360 if ((cur_head_offset >= b_info->head_offset) &&
1361 (cur_head_offset < b_info->tail_offset)) {
1362 qp->b_info = b_info;
1366 if ((cur_head_offset >= b_info->head_offset) ||
1367 (cur_head_offset < b_info->tail_offset)) {
1368 qp->b_info = b_info;
1375 nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
1376 rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
1377 b_info->desccnt = 0;
1378 if (b_info->opcnt > 0) {
1379 qp->b_info = b_info;
1381 rte_mempool_put(qp->batch_mp, (void *)b_info);