1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
10 #include <sys/queue.h>
11 #include <sys/types.h>
14 #include <rte_hexdump.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_spinlock.h>
19 #include <rte_string_fns.h>
20 #include <rte_cryptodev_pmd.h>
23 #include "ccp_crypto.h"
25 #include "ccp_pmd_private.h"
27 static enum ccp_cmd_order
28 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
30 enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
34 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
35 if (xform->next == NULL)
37 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
38 return CCP_CMD_HASH_CIPHER;
40 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41 if (xform->next == NULL)
42 return CCP_CMD_CIPHER;
43 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44 return CCP_CMD_CIPHER_HASH;
46 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
47 return CCP_CMD_COMBINED;
51 /* configure session */
53 ccp_configure_session_cipher(struct ccp_session *sess,
54 const struct rte_crypto_sym_xform *xform)
56 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
59 cipher_xform = &xform->cipher;
61 /* set cipher direction */
62 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
63 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
65 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
68 sess->cipher.key_length = cipher_xform->key.length;
69 rte_memcpy(sess->cipher.key, cipher_xform->key.data,
70 cipher_xform->key.length);
72 /* set iv parameters */
73 sess->iv.offset = cipher_xform->iv.offset;
74 sess->iv.length = cipher_xform->iv.length;
76 switch (cipher_xform->algo) {
77 case RTE_CRYPTO_CIPHER_AES_CTR:
78 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
79 sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
80 sess->cipher.engine = CCP_ENGINE_AES;
82 case RTE_CRYPTO_CIPHER_AES_ECB:
83 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
84 sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
85 sess->cipher.engine = CCP_ENGINE_AES;
87 case RTE_CRYPTO_CIPHER_AES_CBC:
88 sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
89 sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
90 sess->cipher.engine = CCP_ENGINE_AES;
92 case RTE_CRYPTO_CIPHER_3DES_CBC:
93 sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
94 sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
95 sess->cipher.engine = CCP_ENGINE_3DES;
98 CCP_LOG_ERR("Unsupported cipher algo");
103 switch (sess->cipher.engine) {
105 if (sess->cipher.key_length == 16)
106 sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
107 else if (sess->cipher.key_length == 24)
108 sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
109 else if (sess->cipher.key_length == 32)
110 sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
112 CCP_LOG_ERR("Invalid cipher key length");
115 for (i = 0; i < sess->cipher.key_length ; i++)
116 sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
119 case CCP_ENGINE_3DES:
120 if (sess->cipher.key_length == 16)
121 sess->cipher.ut.des_type = CCP_DES_TYPE_128;
122 else if (sess->cipher.key_length == 24)
123 sess->cipher.ut.des_type = CCP_DES_TYPE_192;
125 CCP_LOG_ERR("Invalid cipher key length");
128 for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
129 for (i = 0; i < 8; i++)
130 sess->cipher.key_ccp[(8 + x) - i - 1] =
131 sess->cipher.key[i + x];
134 CCP_LOG_ERR("Invalid CCP Engine");
137 sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
138 sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
143 ccp_configure_session_auth(struct ccp_session *sess,
144 const struct rte_crypto_sym_xform *xform)
146 const struct rte_crypto_auth_xform *auth_xform = NULL;
148 auth_xform = &xform->auth;
150 sess->auth.digest_length = auth_xform->digest_length;
151 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
152 sess->auth.op = CCP_AUTH_OP_GENERATE;
154 sess->auth.op = CCP_AUTH_OP_VERIFY;
155 switch (auth_xform->algo) {
157 CCP_LOG_ERR("Unsupported hash algo");
164 ccp_configure_session_aead(struct ccp_session *sess,
165 const struct rte_crypto_sym_xform *xform)
167 const struct rte_crypto_aead_xform *aead_xform = NULL;
169 aead_xform = &xform->aead;
171 sess->cipher.key_length = aead_xform->key.length;
172 rte_memcpy(sess->cipher.key, aead_xform->key.data,
173 aead_xform->key.length);
175 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
176 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
177 sess->auth.op = CCP_AUTH_OP_GENERATE;
179 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
180 sess->auth.op = CCP_AUTH_OP_VERIFY;
182 sess->auth.aad_length = aead_xform->aad_length;
183 sess->auth.digest_length = aead_xform->digest_length;
185 /* set iv parameters */
186 sess->iv.offset = aead_xform->iv.offset;
187 sess->iv.length = aead_xform->iv.length;
189 switch (aead_xform->algo) {
191 CCP_LOG_ERR("Unsupported aead algo");
198 ccp_set_session_parameters(struct ccp_session *sess,
199 const struct rte_crypto_sym_xform *xform)
201 const struct rte_crypto_sym_xform *cipher_xform = NULL;
202 const struct rte_crypto_sym_xform *auth_xform = NULL;
203 const struct rte_crypto_sym_xform *aead_xform = NULL;
206 sess->cmd_id = ccp_get_cmd_id(xform);
208 switch (sess->cmd_id) {
210 cipher_xform = xform;
215 case CCP_CMD_CIPHER_HASH:
216 cipher_xform = xform;
217 auth_xform = xform->next;
219 case CCP_CMD_HASH_CIPHER:
221 cipher_xform = xform->next;
223 case CCP_CMD_COMBINED:
227 CCP_LOG_ERR("Unsupported cmd_id");
231 /* Default IV length = 0 */
234 ret = ccp_configure_session_cipher(sess, cipher_xform);
236 CCP_LOG_ERR("Invalid/unsupported cipher parameters");
241 ret = ccp_configure_session_auth(sess, auth_xform);
243 CCP_LOG_ERR("Invalid/unsupported auth parameters");
248 ret = ccp_configure_session_aead(sess, aead_xform);
250 CCP_LOG_ERR("Invalid/unsupported aead parameters");
257 /* calculate CCP descriptors requirement */
259 ccp_cipher_slot(struct ccp_session *session)
263 switch (session->cipher.algo) {
264 case CCP_CIPHER_ALGO_AES_CBC:
266 /**< op + passthrough for iv */
268 case CCP_CIPHER_ALGO_AES_ECB:
272 case CCP_CIPHER_ALGO_AES_CTR:
274 /**< op + passthrough for iv */
276 case CCP_CIPHER_ALGO_3DES_CBC:
278 /**< op + passthrough for iv */
281 CCP_LOG_ERR("Unsupported cipher algo %d",
282 session->cipher.algo);
288 ccp_auth_slot(struct ccp_session *session)
292 switch (session->auth.algo) {
294 CCP_LOG_ERR("Unsupported auth algo %d",
302 ccp_aead_slot(struct ccp_session *session)
306 switch (session->aead_algo) {
308 CCP_LOG_ERR("Unsupported aead algo %d",
315 ccp_compute_slot_count(struct ccp_session *session)
319 switch (session->cmd_id) {
321 count = ccp_cipher_slot(session);
324 count = ccp_auth_slot(session);
326 case CCP_CMD_CIPHER_HASH:
327 case CCP_CMD_HASH_CIPHER:
328 count = ccp_cipher_slot(session);
329 count += ccp_auth_slot(session);
331 case CCP_CMD_COMBINED:
332 count = ccp_aead_slot(session);
335 CCP_LOG_ERR("Unsupported cmd_id");
343 ccp_perform_passthru(struct ccp_passthru *pst,
344 struct ccp_queue *cmd_q)
346 struct ccp_desc *desc;
347 union ccp_function function;
349 desc = &cmd_q->qbase_desc[cmd_q->qidx];
351 CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
353 CCP_CMD_SOC(desc) = 0;
354 CCP_CMD_IOC(desc) = 0;
355 CCP_CMD_INIT(desc) = 0;
356 CCP_CMD_EOM(desc) = 0;
357 CCP_CMD_PROT(desc) = 0;
360 CCP_PT_BYTESWAP(&function) = pst->byte_swap;
361 CCP_PT_BITWISE(&function) = pst->bit_mod;
362 CCP_CMD_FUNCTION(desc) = function.raw;
364 CCP_CMD_LEN(desc) = pst->len;
367 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
368 CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
369 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
371 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
372 CCP_CMD_DST_HI(desc) = 0;
373 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
375 if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
376 CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
379 CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
380 CCP_CMD_SRC_HI(desc) = 0;
381 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
383 CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
384 CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
385 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
388 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
392 ccp_perform_aes(struct rte_crypto_op *op,
393 struct ccp_queue *cmd_q,
394 struct ccp_batch_info *b_info)
396 struct ccp_session *session;
397 union ccp_function function;
399 struct ccp_passthru pst = {0};
400 struct ccp_desc *desc;
401 phys_addr_t src_addr, dest_addr, key_addr;
404 session = (struct ccp_session *)get_session_private_data(
406 ccp_cryptodev_driver_id);
409 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
410 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
411 if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
412 rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
413 iv, session->iv.length);
414 pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
415 CCP_AES_SIZE(&function) = 0x1F;
418 &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
420 (CCP_SB_BYTES - session->iv.length),
421 iv, session->iv.length);
422 pst.src_addr = b_info->lsb_buf_phys +
423 (b_info->lsb_buf_idx * CCP_SB_BYTES);
424 b_info->lsb_buf_idx++;
427 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
428 pst.len = CCP_SB_BYTES;
430 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
431 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
432 ccp_perform_passthru(&pst, cmd_q);
435 desc = &cmd_q->qbase_desc[cmd_q->qidx];
437 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
438 op->sym->cipher.data.offset);
439 if (likely(op->sym->m_dst != NULL))
440 dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
441 op->sym->cipher.data.offset);
443 dest_addr = src_addr;
444 key_addr = session->cipher.key_phys;
446 /* prepare desc for aes command */
447 CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
448 CCP_CMD_INIT(desc) = 1;
449 CCP_CMD_EOM(desc) = 1;
451 CCP_AES_ENCRYPT(&function) = session->cipher.dir;
452 CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
453 CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
454 CCP_CMD_FUNCTION(desc) = function.raw;
456 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
458 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
459 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
460 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
462 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
463 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
464 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
466 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
467 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
468 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
470 if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
471 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
473 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
474 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
479 ccp_perform_3des(struct rte_crypto_op *op,
480 struct ccp_queue *cmd_q,
481 struct ccp_batch_info *b_info)
483 struct ccp_session *session;
484 union ccp_function function;
485 unsigned char *lsb_buf;
486 struct ccp_passthru pst;
487 struct ccp_desc *desc;
490 phys_addr_t src_addr, dest_addr, key_addr;
492 session = (struct ccp_session *)get_session_private_data(
494 ccp_cryptodev_driver_id);
496 iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
497 switch (session->cipher.um.des_mode) {
498 case CCP_DES_MODE_CBC:
499 lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
500 b_info->lsb_buf_idx++;
502 rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
503 iv, session->iv.length);
505 pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
506 pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
507 pst.len = CCP_SB_BYTES;
509 pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
510 pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
511 ccp_perform_passthru(&pst, cmd_q);
513 case CCP_DES_MODE_CFB:
514 case CCP_DES_MODE_ECB:
515 CCP_LOG_ERR("Unsupported DES cipher mode");
519 src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
520 op->sym->cipher.data.offset);
521 if (unlikely(op->sym->m_dst != NULL))
523 rte_pktmbuf_mtophys_offset(op->sym->m_dst,
524 op->sym->cipher.data.offset);
526 dest_addr = src_addr;
528 key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
530 desc = &cmd_q->qbase_desc[cmd_q->qidx];
532 memset(desc, 0, Q_DESC_SIZE);
534 /* prepare desc for des command */
535 CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
537 CCP_CMD_SOC(desc) = 0;
538 CCP_CMD_IOC(desc) = 0;
539 CCP_CMD_INIT(desc) = 1;
540 CCP_CMD_EOM(desc) = 1;
541 CCP_CMD_PROT(desc) = 0;
544 CCP_DES_ENCRYPT(&function) = session->cipher.dir;
545 CCP_DES_MODE(&function) = session->cipher.um.des_mode;
546 CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
547 CCP_CMD_FUNCTION(desc) = function.raw;
549 CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
551 CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
552 CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
553 CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
555 CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
556 CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
557 CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
559 CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
560 CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
561 CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
563 if (session->cipher.um.des_mode)
564 CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
566 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
570 /* Write the new tail address back to the queue register */
571 tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
572 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
573 /* Turn the queue back on using our cached control register */
574 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
575 cmd_q->qcontrol | CMD_Q_RUN);
577 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
582 ccp_crypto_cipher(struct rte_crypto_op *op,
583 struct ccp_queue *cmd_q,
584 struct ccp_batch_info *b_info)
587 struct ccp_session *session;
589 session = (struct ccp_session *)get_session_private_data(
591 ccp_cryptodev_driver_id);
593 switch (session->cipher.algo) {
594 case CCP_CIPHER_ALGO_AES_CBC:
595 result = ccp_perform_aes(op, cmd_q, b_info);
596 b_info->desccnt += 2;
598 case CCP_CIPHER_ALGO_AES_CTR:
599 result = ccp_perform_aes(op, cmd_q, b_info);
600 b_info->desccnt += 2;
602 case CCP_CIPHER_ALGO_AES_ECB:
603 result = ccp_perform_aes(op, cmd_q, b_info);
604 b_info->desccnt += 1;
606 case CCP_CIPHER_ALGO_3DES_CBC:
607 result = ccp_perform_3des(op, cmd_q, b_info);
608 b_info->desccnt += 2;
611 CCP_LOG_ERR("Unsupported cipher algo %d",
612 session->cipher.algo);
619 ccp_crypto_auth(struct rte_crypto_op *op,
620 struct ccp_queue *cmd_q __rte_unused,
621 struct ccp_batch_info *b_info __rte_unused)
625 struct ccp_session *session;
627 session = (struct ccp_session *)get_session_private_data(
629 ccp_cryptodev_driver_id);
631 switch (session->auth.algo) {
633 CCP_LOG_ERR("Unsupported auth algo %d",
642 ccp_crypto_aead(struct rte_crypto_op *op,
643 struct ccp_queue *cmd_q __rte_unused,
644 struct ccp_batch_info *b_info __rte_unused)
647 struct ccp_session *session;
649 session = (struct ccp_session *)get_session_private_data(
651 ccp_cryptodev_driver_id);
653 switch (session->aead_algo) {
655 CCP_LOG_ERR("Unsupported aead algo %d",
663 process_ops_to_enqueue(const struct ccp_qp *qp,
664 struct rte_crypto_op **op,
665 struct ccp_queue *cmd_q,
670 struct ccp_batch_info *b_info;
671 struct ccp_session *session;
673 if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
674 CCP_LOG_ERR("batch info allocation failed");
677 /* populate batch info necessary for dequeue */
679 b_info->lsb_buf_idx = 0;
681 b_info->cmd_q = cmd_q;
682 b_info->lsb_buf_phys =
683 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
684 rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
686 b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
688 for (i = 0; i < nb_ops; i++) {
689 session = (struct ccp_session *)get_session_private_data(
691 ccp_cryptodev_driver_id);
692 switch (session->cmd_id) {
694 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
697 result = ccp_crypto_auth(op[i], cmd_q, b_info);
699 case CCP_CMD_CIPHER_HASH:
700 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
703 result = ccp_crypto_auth(op[i], cmd_q, b_info);
705 case CCP_CMD_HASH_CIPHER:
706 result = ccp_crypto_auth(op[i], cmd_q, b_info);
709 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
711 case CCP_CMD_COMBINED:
712 result = ccp_crypto_aead(op[i], cmd_q, b_info);
715 CCP_LOG_ERR("Unsupported cmd_id");
718 if (unlikely(result < 0)) {
719 rte_atomic64_add(&b_info->cmd_q->free_slots,
720 (slots_req - b_info->desccnt));
723 b_info->op[i] = op[i];
727 b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
731 /* Write the new tail address back to the queue register */
732 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
733 b_info->tail_offset);
734 /* Turn the queue back on using our cached control register */
735 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
736 cmd_q->qcontrol | CMD_Q_RUN);
738 rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
743 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
745 struct ccp_session *session;
746 uint8_t *digest_data, *addr;
747 struct rte_mbuf *m_last;
748 int offset, digest_offset;
749 uint8_t digest_le[64];
751 session = (struct ccp_session *)get_session_private_data(
753 ccp_cryptodev_driver_id);
755 if (session->cmd_id == CCP_CMD_COMBINED) {
756 digest_data = op->sym->aead.digest.data;
757 digest_offset = op->sym->aead.data.offset +
758 op->sym->aead.data.length;
760 digest_data = op->sym->auth.digest.data;
761 digest_offset = op->sym->auth.data.offset +
762 op->sym->auth.data.length;
764 m_last = rte_pktmbuf_lastseg(op->sym->m_src);
765 addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
766 m_last->data_len - session->auth.ctx_len);
769 offset = session->auth.offset;
771 if (session->auth.engine == CCP_ENGINE_SHA)
772 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
773 (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
774 (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
775 /* All other algorithms require byte
780 offset = session->auth.ctx_len -
781 session->auth.offset - 1;
782 for (i = 0; i < session->auth.digest_length; i++)
783 digest_le[i] = addr[offset - i];
788 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
789 if (session->auth.op == CCP_AUTH_OP_VERIFY) {
790 if (memcmp(addr + offset, digest_data,
791 session->auth.digest_length) != 0)
792 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
795 if (unlikely(digest_data == 0))
796 digest_data = rte_pktmbuf_mtod_offset(
797 op->sym->m_dst, uint8_t *,
799 rte_memcpy(digest_data, addr + offset,
800 session->auth.digest_length);
802 /* Trim area used for digest from mbuf. */
803 rte_pktmbuf_trim(op->sym->m_src,
804 session->auth.ctx_len);
808 ccp_prepare_ops(struct rte_crypto_op **op_d,
809 struct ccp_batch_info *b_info,
813 struct ccp_session *session;
815 min_ops = RTE_MIN(nb_ops, b_info->opcnt);
817 for (i = 0; i < min_ops; i++) {
818 op_d[i] = b_info->op[b_info->op_idx++];
819 session = (struct ccp_session *)get_session_private_data(
820 op_d[i]->sym->session,
821 ccp_cryptodev_driver_id);
822 switch (session->cmd_id) {
824 op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
827 case CCP_CMD_CIPHER_HASH:
828 case CCP_CMD_HASH_CIPHER:
829 case CCP_CMD_COMBINED:
830 ccp_auth_dq_prepare(op_d[i]);
833 CCP_LOG_ERR("Unsupported cmd_id");
837 b_info->opcnt -= min_ops;
842 process_ops_to_dequeue(struct ccp_qp *qp,
843 struct rte_crypto_op **op,
846 struct ccp_batch_info *b_info;
847 uint32_t cur_head_offset;
849 if (qp->b_info != NULL) {
851 if (unlikely(b_info->op_idx > 0))
853 } else if (rte_ring_dequeue(qp->processed_pkts,
856 cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
859 if (b_info->head_offset < b_info->tail_offset) {
860 if ((cur_head_offset >= b_info->head_offset) &&
861 (cur_head_offset < b_info->tail_offset)) {
866 if ((cur_head_offset >= b_info->head_offset) ||
867 (cur_head_offset < b_info->tail_offset)) {
875 nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
876 rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
878 if (b_info->opcnt > 0) {
881 rte_mempool_put(qp->batch_mp, (void *)b_info);