1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
10 #include <sys/queue.h>
11 #include <sys/types.h>
14 #include <rte_hexdump.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_spinlock.h>
19 #include <rte_string_fns.h>
20 #include <rte_cryptodev_pmd.h>
23 #include "ccp_crypto.h"
25 #include "ccp_pmd_private.h"
27 static enum ccp_cmd_order
28 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
30 enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
34 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
35 if (xform->next == NULL)
37 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
38 return CCP_CMD_HASH_CIPHER;
40 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
41 if (xform->next == NULL)
42 return CCP_CMD_CIPHER;
43 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
44 return CCP_CMD_CIPHER_HASH;
46 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
47 return CCP_CMD_COMBINED;
51 /* configure session */
53 ccp_configure_session_cipher(struct ccp_session *sess,
54 const struct rte_crypto_sym_xform *xform)
56 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
58 cipher_xform = &xform->cipher;
60 /* set cipher direction */
61 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
62 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
64 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
67 sess->cipher.key_length = cipher_xform->key.length;
68 rte_memcpy(sess->cipher.key, cipher_xform->key.data,
69 cipher_xform->key.length);
71 /* set iv parameters */
72 sess->iv.offset = cipher_xform->iv.offset;
73 sess->iv.length = cipher_xform->iv.length;
75 switch (cipher_xform->algo) {
77 CCP_LOG_ERR("Unsupported cipher algo");
82 switch (sess->cipher.engine) {
84 CCP_LOG_ERR("Invalid CCP Engine");
91 ccp_configure_session_auth(struct ccp_session *sess,
92 const struct rte_crypto_sym_xform *xform)
94 const struct rte_crypto_auth_xform *auth_xform = NULL;
96 auth_xform = &xform->auth;
98 sess->auth.digest_length = auth_xform->digest_length;
99 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
100 sess->auth.op = CCP_AUTH_OP_GENERATE;
102 sess->auth.op = CCP_AUTH_OP_VERIFY;
103 switch (auth_xform->algo) {
105 CCP_LOG_ERR("Unsupported hash algo");
112 ccp_configure_session_aead(struct ccp_session *sess,
113 const struct rte_crypto_sym_xform *xform)
115 const struct rte_crypto_aead_xform *aead_xform = NULL;
117 aead_xform = &xform->aead;
119 sess->cipher.key_length = aead_xform->key.length;
120 rte_memcpy(sess->cipher.key, aead_xform->key.data,
121 aead_xform->key.length);
123 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
124 sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
125 sess->auth.op = CCP_AUTH_OP_GENERATE;
127 sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
128 sess->auth.op = CCP_AUTH_OP_VERIFY;
130 sess->auth.aad_length = aead_xform->aad_length;
131 sess->auth.digest_length = aead_xform->digest_length;
133 /* set iv parameters */
134 sess->iv.offset = aead_xform->iv.offset;
135 sess->iv.length = aead_xform->iv.length;
137 switch (aead_xform->algo) {
139 CCP_LOG_ERR("Unsupported aead algo");
146 ccp_set_session_parameters(struct ccp_session *sess,
147 const struct rte_crypto_sym_xform *xform)
149 const struct rte_crypto_sym_xform *cipher_xform = NULL;
150 const struct rte_crypto_sym_xform *auth_xform = NULL;
151 const struct rte_crypto_sym_xform *aead_xform = NULL;
154 sess->cmd_id = ccp_get_cmd_id(xform);
156 switch (sess->cmd_id) {
158 cipher_xform = xform;
163 case CCP_CMD_CIPHER_HASH:
164 cipher_xform = xform;
165 auth_xform = xform->next;
167 case CCP_CMD_HASH_CIPHER:
169 cipher_xform = xform->next;
171 case CCP_CMD_COMBINED:
175 CCP_LOG_ERR("Unsupported cmd_id");
179 /* Default IV length = 0 */
182 ret = ccp_configure_session_cipher(sess, cipher_xform);
184 CCP_LOG_ERR("Invalid/unsupported cipher parameters");
189 ret = ccp_configure_session_auth(sess, auth_xform);
191 CCP_LOG_ERR("Invalid/unsupported auth parameters");
196 ret = ccp_configure_session_aead(sess, aead_xform);
198 CCP_LOG_ERR("Invalid/unsupported aead parameters");
205 /* calculate CCP descriptors requirement */
207 ccp_cipher_slot(struct ccp_session *session)
211 switch (session->cipher.algo) {
213 CCP_LOG_ERR("Unsupported cipher algo %d",
214 session->cipher.algo);
220 ccp_auth_slot(struct ccp_session *session)
224 switch (session->auth.algo) {
226 CCP_LOG_ERR("Unsupported auth algo %d",
234 ccp_aead_slot(struct ccp_session *session)
238 switch (session->aead_algo) {
240 CCP_LOG_ERR("Unsupported aead algo %d",
247 ccp_compute_slot_count(struct ccp_session *session)
251 switch (session->cmd_id) {
253 count = ccp_cipher_slot(session);
256 count = ccp_auth_slot(session);
258 case CCP_CMD_CIPHER_HASH:
259 case CCP_CMD_HASH_CIPHER:
260 count = ccp_cipher_slot(session);
261 count += ccp_auth_slot(session);
263 case CCP_CMD_COMBINED:
264 count = ccp_aead_slot(session);
267 CCP_LOG_ERR("Unsupported cmd_id");
275 ccp_crypto_cipher(struct rte_crypto_op *op,
276 struct ccp_queue *cmd_q __rte_unused,
277 struct ccp_batch_info *b_info __rte_unused)
280 struct ccp_session *session;
282 session = (struct ccp_session *)get_session_private_data(
284 ccp_cryptodev_driver_id);
286 switch (session->cipher.algo) {
288 CCP_LOG_ERR("Unsupported cipher algo %d",
289 session->cipher.algo);
296 ccp_crypto_auth(struct rte_crypto_op *op,
297 struct ccp_queue *cmd_q __rte_unused,
298 struct ccp_batch_info *b_info __rte_unused)
302 struct ccp_session *session;
304 session = (struct ccp_session *)get_session_private_data(
306 ccp_cryptodev_driver_id);
308 switch (session->auth.algo) {
310 CCP_LOG_ERR("Unsupported auth algo %d",
319 ccp_crypto_aead(struct rte_crypto_op *op,
320 struct ccp_queue *cmd_q __rte_unused,
321 struct ccp_batch_info *b_info __rte_unused)
324 struct ccp_session *session;
326 session = (struct ccp_session *)get_session_private_data(
328 ccp_cryptodev_driver_id);
330 switch (session->aead_algo) {
332 CCP_LOG_ERR("Unsupported aead algo %d",
340 process_ops_to_enqueue(const struct ccp_qp *qp,
341 struct rte_crypto_op **op,
342 struct ccp_queue *cmd_q,
347 struct ccp_batch_info *b_info;
348 struct ccp_session *session;
350 if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
351 CCP_LOG_ERR("batch info allocation failed");
354 /* populate batch info necessary for dequeue */
356 b_info->lsb_buf_idx = 0;
358 b_info->cmd_q = cmd_q;
359 b_info->lsb_buf_phys =
360 (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
361 rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
363 b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
365 for (i = 0; i < nb_ops; i++) {
366 session = (struct ccp_session *)get_session_private_data(
368 ccp_cryptodev_driver_id);
369 switch (session->cmd_id) {
371 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
374 result = ccp_crypto_auth(op[i], cmd_q, b_info);
376 case CCP_CMD_CIPHER_HASH:
377 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
380 result = ccp_crypto_auth(op[i], cmd_q, b_info);
382 case CCP_CMD_HASH_CIPHER:
383 result = ccp_crypto_auth(op[i], cmd_q, b_info);
386 result = ccp_crypto_cipher(op[i], cmd_q, b_info);
388 case CCP_CMD_COMBINED:
389 result = ccp_crypto_aead(op[i], cmd_q, b_info);
392 CCP_LOG_ERR("Unsupported cmd_id");
395 if (unlikely(result < 0)) {
396 rte_atomic64_add(&b_info->cmd_q->free_slots,
397 (slots_req - b_info->desccnt));
400 b_info->op[i] = op[i];
404 b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
408 /* Write the new tail address back to the queue register */
409 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
410 b_info->tail_offset);
411 /* Turn the queue back on using our cached control register */
412 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
413 cmd_q->qcontrol | CMD_Q_RUN);
415 rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
420 static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
422 struct ccp_session *session;
423 uint8_t *digest_data, *addr;
424 struct rte_mbuf *m_last;
425 int offset, digest_offset;
426 uint8_t digest_le[64];
428 session = (struct ccp_session *)get_session_private_data(
430 ccp_cryptodev_driver_id);
432 if (session->cmd_id == CCP_CMD_COMBINED) {
433 digest_data = op->sym->aead.digest.data;
434 digest_offset = op->sym->aead.data.offset +
435 op->sym->aead.data.length;
437 digest_data = op->sym->auth.digest.data;
438 digest_offset = op->sym->auth.data.offset +
439 op->sym->auth.data.length;
441 m_last = rte_pktmbuf_lastseg(op->sym->m_src);
442 addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
443 m_last->data_len - session->auth.ctx_len);
446 offset = session->auth.offset;
448 if (session->auth.engine == CCP_ENGINE_SHA)
449 if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
450 (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
451 (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
452 /* All other algorithms require byte
457 offset = session->auth.ctx_len -
458 session->auth.offset - 1;
459 for (i = 0; i < session->auth.digest_length; i++)
460 digest_le[i] = addr[offset - i];
465 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
466 if (session->auth.op == CCP_AUTH_OP_VERIFY) {
467 if (memcmp(addr + offset, digest_data,
468 session->auth.digest_length) != 0)
469 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
472 if (unlikely(digest_data == 0))
473 digest_data = rte_pktmbuf_mtod_offset(
474 op->sym->m_dst, uint8_t *,
476 rte_memcpy(digest_data, addr + offset,
477 session->auth.digest_length);
479 /* Trim area used for digest from mbuf. */
480 rte_pktmbuf_trim(op->sym->m_src,
481 session->auth.ctx_len);
485 ccp_prepare_ops(struct rte_crypto_op **op_d,
486 struct ccp_batch_info *b_info,
490 struct ccp_session *session;
492 min_ops = RTE_MIN(nb_ops, b_info->opcnt);
494 for (i = 0; i < min_ops; i++) {
495 op_d[i] = b_info->op[b_info->op_idx++];
496 session = (struct ccp_session *)get_session_private_data(
497 op_d[i]->sym->session,
498 ccp_cryptodev_driver_id);
499 switch (session->cmd_id) {
501 op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
504 case CCP_CMD_CIPHER_HASH:
505 case CCP_CMD_HASH_CIPHER:
506 case CCP_CMD_COMBINED:
507 ccp_auth_dq_prepare(op_d[i]);
510 CCP_LOG_ERR("Unsupported cmd_id");
514 b_info->opcnt -= min_ops;
519 process_ops_to_dequeue(struct ccp_qp *qp,
520 struct rte_crypto_op **op,
523 struct ccp_batch_info *b_info;
524 uint32_t cur_head_offset;
526 if (qp->b_info != NULL) {
528 if (unlikely(b_info->op_idx > 0))
530 } else if (rte_ring_dequeue(qp->processed_pkts,
533 cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
536 if (b_info->head_offset < b_info->tail_offset) {
537 if ((cur_head_offset >= b_info->head_offset) &&
538 (cur_head_offset < b_info->tail_offset)) {
543 if ((cur_head_offset >= b_info->head_offset) ||
544 (cur_head_offset < b_info->tail_offset)) {
552 nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
553 rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
555 if (b_info->opcnt > 0) {
558 rte_mempool_put(qp->batch_mp, (void *)b_info);