1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
5 #ifndef _CPT_UCODE_ASYM_H_
6 #define _CPT_UCODE_ASYM_H_
8 #include <rte_common.h>
9 #include <rte_crypto_asym.h>
10 #include <rte_malloc.h>
12 #include "cpt_common.h"
13 #include "cpt_hw_types.h"
14 #include "cpt_mcode_defines.h"
16 static __rte_always_inline void
17 cpt_modex_param_normalize(uint8_t **data, size_t *len)
21 /* Strip leading NUL bytes */
23 for (i = 0; i < *len; i++) {
32 static __rte_always_inline int
33 cpt_fill_modex_params(struct cpt_asym_sess_misc *sess,
34 struct rte_crypto_asym_xform *xform)
36 struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
37 size_t exp_len = xform->modex.exponent.length;
38 size_t mod_len = xform->modex.modulus.length;
39 uint8_t *exp = xform->modex.exponent.data;
40 uint8_t *mod = xform->modex.modulus.data;
42 cpt_modex_param_normalize(&mod, &mod_len);
43 cpt_modex_param_normalize(&exp, &exp_len);
45 if (unlikely(exp_len == 0 || mod_len == 0))
48 if (unlikely(exp_len > mod_len)) {
49 CPT_LOG_DP_ERR("Exponent length greater than modulus length is not supported");
53 /* Allocate buffer to hold modexp params */
54 ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
55 if (ctx->modulus.data == NULL) {
56 CPT_LOG_DP_ERR("Could not allocate buffer for modex params");
60 /* Set up modexp prime modulus and private exponent */
62 memcpy(ctx->modulus.data, mod, mod_len);
63 ctx->exponent.data = ctx->modulus.data + mod_len;
64 memcpy(ctx->exponent.data, exp, exp_len);
66 ctx->modulus.length = mod_len;
67 ctx->exponent.length = exp_len;
72 static __rte_always_inline int
73 cpt_fill_rsa_params(struct cpt_asym_sess_misc *sess,
74 struct rte_crypto_asym_xform *xform)
76 struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
77 struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
78 struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
79 size_t mod_len = xfrm_rsa->n.length;
80 size_t exp_len = xfrm_rsa->e.length;
84 /* Make sure key length used is not more than mod_len/2 */
85 if (qt.p.data != NULL)
86 len = (((mod_len / 2) < qt.p.length) ? len : qt.p.length);
88 /* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
89 total_size = mod_len + exp_len + 5 * len;
91 /* Allocate buffer to hold all RSA keys */
92 rsa->n.data = rte_malloc(NULL, total_size, 0);
93 if (rsa->n.data == NULL) {
94 CPT_LOG_DP_ERR("Could not allocate buffer for RSA keys");
98 /* Set up RSA prime modulus and public key exponent */
99 memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
100 rsa->e.data = rsa->n.data + mod_len;
101 memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
103 /* Private key in quintuple format */
105 rsa->qt.q.data = rsa->e.data + exp_len;
106 memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
107 rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
108 memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
109 rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
110 memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
111 rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
112 memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
113 rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
114 memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
116 rsa->qt.q.length = qt.q.length;
117 rsa->qt.dQ.length = qt.dQ.length;
118 rsa->qt.p.length = qt.p.length;
119 rsa->qt.dP.length = qt.dP.length;
120 rsa->qt.qInv.length = qt.qInv.length;
122 rsa->n.length = mod_len;
123 rsa->e.length = exp_len;
128 static __rte_always_inline int
129 cpt_fill_ec_params(struct cpt_asym_sess_misc *sess,
130 struct rte_crypto_asym_xform *xform)
132 struct cpt_asym_ec_ctx *ec = &sess->ec_ctx;
134 switch (xform->ec.curve_id) {
135 case RTE_CRYPTO_EC_GROUP_SECP192R1:
136 ec->curveid = CPT_EC_ID_P192;
138 case RTE_CRYPTO_EC_GROUP_SECP224R1:
139 ec->curveid = CPT_EC_ID_P224;
141 case RTE_CRYPTO_EC_GROUP_SECP256R1:
142 ec->curveid = CPT_EC_ID_P256;
144 case RTE_CRYPTO_EC_GROUP_SECP384R1:
145 ec->curveid = CPT_EC_ID_P384;
147 case RTE_CRYPTO_EC_GROUP_SECP521R1:
148 ec->curveid = CPT_EC_ID_P521;
151 /* Only NIST curves (FIPS 186-4) are supported */
152 CPT_LOG_DP_ERR("Unsupported curve");
159 static __rte_always_inline int
160 cpt_fill_asym_session_parameters(struct cpt_asym_sess_misc *sess,
161 struct rte_crypto_asym_xform *xform)
165 sess->xfrm_type = xform->xform_type;
167 switch (xform->xform_type) {
168 case RTE_CRYPTO_ASYM_XFORM_RSA:
169 ret = cpt_fill_rsa_params(sess, xform);
171 case RTE_CRYPTO_ASYM_XFORM_MODEX:
172 ret = cpt_fill_modex_params(sess, xform);
174 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
175 ret = cpt_fill_ec_params(sess, xform);
178 CPT_LOG_DP_ERR("Unsupported transform type");
184 static __rte_always_inline void
185 cpt_free_asym_session_parameters(struct cpt_asym_sess_misc *sess)
187 struct rte_crypto_modex_xform *mod;
188 struct rte_crypto_rsa_xform *rsa;
190 switch (sess->xfrm_type) {
191 case RTE_CRYPTO_ASYM_XFORM_RSA:
192 rsa = &sess->rsa_ctx;
194 rte_free(rsa->n.data);
196 case RTE_CRYPTO_ASYM_XFORM_MODEX:
197 mod = &sess->mod_ctx;
198 if (mod->modulus.data)
199 rte_free(mod->modulus.data);
201 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
204 CPT_LOG_DP_ERR("Invalid transform type");
209 static __rte_always_inline void
210 cpt_fill_req_comp_addr(struct cpt_request_info *req, buf_ptr_t addr)
212 void *completion_addr = RTE_PTR_ALIGN(addr.vaddr, 16);
214 /* Pointer to cpt_res_s, updated by CPT */
215 req->completion_addr = (volatile uint64_t *)completion_addr;
216 req->comp_baddr = addr.dma_addr +
217 RTE_PTR_DIFF(completion_addr, addr.vaddr);
218 *(req->completion_addr) = COMPLETION_CODE_INIT;
221 static __rte_always_inline int
222 cpt_modex_prep(struct asym_op_params *modex_params,
223 struct rte_crypto_modex_xform *mod)
225 struct cpt_request_info *req = modex_params->req;
226 phys_addr_t mphys = modex_params->meta_buf;
227 uint32_t exp_len = mod->exponent.length;
228 uint32_t mod_len = mod->modulus.length;
229 struct rte_crypto_mod_op_param mod_op;
230 struct rte_crypto_op **op;
231 vq_cmd_word0_t vq_cmd_w0;
232 uint64_t total_key_len;
233 opcode_info_t opcode;
239 /* Extracting modex op form params->req->op[1]->asym->modex */
240 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
241 mod_op = ((struct rte_crypto_op *)*op)->asym->modex;
243 base_len = mod_op.base.length;
244 if (unlikely(base_len > mod_len)) {
245 CPT_LOG_DP_ERR("Base length greater than modulus length is not supported");
246 (*op)->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
250 total_key_len = mod_len + exp_len;
253 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
254 memcpy(dptr, mod->modulus.data, total_key_len);
255 dptr += total_key_len;
256 memcpy(dptr, mod_op.base.data, base_len);
258 dlen = total_key_len + base_len;
264 opcode.s.major = CPT_MAJOR_OP_MODEX;
265 opcode.s.minor = CPT_MINOR_OP_MODEX;
266 vq_cmd_w0.s.opcode = opcode.flags;
269 vq_cmd_w0.s.param1 = mod_len;
270 vq_cmd_w0.s.param2 = exp_len;
271 vq_cmd_w0.s.dlen = dlen;
273 /* Filling cpt_request_info structure */
274 req->ist.ei0 = vq_cmd_w0.u64;
275 req->ist.ei1 = mphys;
276 req->ist.ei2 = mphys + dlen;
278 /* Result pointer to store result data */
281 /* alternate_caddr to write completion status of the microcode */
282 req->alternate_caddr = (uint64_t *)(dptr + rlen);
283 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
285 /* Preparing completion addr, +1 for completion code */
286 caddr.vaddr = dptr + rlen + 1;
287 caddr.dma_addr = mphys + dlen + rlen + 1;
289 cpt_fill_req_comp_addr(req, caddr);
293 static __rte_always_inline void
294 cpt_rsa_prep(struct asym_op_params *rsa_params,
295 struct rte_crypto_rsa_xform *rsa,
296 rte_crypto_param *crypto_param)
298 struct cpt_request_info *req = rsa_params->req;
299 phys_addr_t mphys = rsa_params->meta_buf;
300 struct rte_crypto_rsa_op_param rsa_op;
301 uint32_t mod_len = rsa->n.length;
302 uint32_t exp_len = rsa->e.length;
303 struct rte_crypto_op **op;
304 vq_cmd_word0_t vq_cmd_w0;
305 uint64_t total_key_len;
306 opcode_info_t opcode;
312 /* Extracting rsa op form params->req->op[1]->asym->rsa */
313 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
314 rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
315 total_key_len = mod_len + exp_len;
318 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
319 memcpy(dptr, rsa->n.data, total_key_len);
320 dptr += total_key_len;
322 in_size = crypto_param->length;
323 memcpy(dptr, crypto_param->data, in_size);
326 dlen = total_key_len + in_size;
331 if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
332 /* Use mod_exp operation for no_padding type */
333 opcode.s.minor = CPT_MINOR_OP_MODEX;
334 vq_cmd_w0.s.param2 = exp_len;
336 if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
337 opcode.s.minor = CPT_MINOR_OP_PKCS_ENC;
338 /* Public key encrypt, use BT2*/
339 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2 |
340 ((uint16_t)(exp_len) << 1);
341 } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
342 opcode.s.minor = CPT_MINOR_OP_PKCS_DEC;
343 /* Public key decrypt, use BT1 */
344 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
345 /* + 2 for decrypted len */
351 opcode.s.major = CPT_MAJOR_OP_MODEX;
352 vq_cmd_w0.s.opcode = opcode.flags;
355 vq_cmd_w0.s.param1 = mod_len;
356 vq_cmd_w0.s.dlen = dlen;
358 /* Filling cpt_request_info structure */
359 req->ist.ei0 = vq_cmd_w0.u64;
360 req->ist.ei1 = mphys;
361 req->ist.ei2 = mphys + dlen;
363 /* Result pointer to store result data */
366 /* alternate_caddr to write completion status of the microcode */
367 req->alternate_caddr = (uint64_t *)(dptr + rlen);
368 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
370 /* Preparing completion addr, +1 for completion code */
371 caddr.vaddr = dptr + rlen + 1;
372 caddr.dma_addr = mphys + dlen + rlen + 1;
374 cpt_fill_req_comp_addr(req, caddr);
377 static __rte_always_inline void
378 cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
379 struct rte_crypto_rsa_xform *rsa,
380 rte_crypto_param *crypto_param)
382 struct cpt_request_info *req = rsa_params->req;
383 phys_addr_t mphys = rsa_params->meta_buf;
384 uint32_t qInv_len = rsa->qt.qInv.length;
385 struct rte_crypto_rsa_op_param rsa_op;
386 uint32_t dP_len = rsa->qt.dP.length;
387 uint32_t dQ_len = rsa->qt.dQ.length;
388 uint32_t p_len = rsa->qt.p.length;
389 uint32_t q_len = rsa->qt.q.length;
390 uint32_t mod_len = rsa->n.length;
391 struct rte_crypto_op **op;
392 vq_cmd_word0_t vq_cmd_w0;
393 uint64_t total_key_len;
394 opcode_info_t opcode;
400 /* Extracting rsa op form params->req->op[1]->asym->rsa */
401 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
402 rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
403 total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
406 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
407 memcpy(dptr, rsa->qt.q.data, total_key_len);
408 dptr += total_key_len;
410 in_size = crypto_param->length;
411 memcpy(dptr, crypto_param->data, in_size);
414 dlen = total_key_len + in_size;
419 if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
420 /*Use mod_exp operation for no_padding type */
421 opcode.s.minor = CPT_MINOR_OP_MODEX_CRT;
423 if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
424 opcode.s.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
425 /* Private encrypt, use BT1 */
426 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
427 } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
428 opcode.s.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
429 /* Private decrypt, use BT2 */
430 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2;
431 /* + 2 for decrypted len */
437 opcode.s.major = CPT_MAJOR_OP_MODEX;
438 vq_cmd_w0.s.opcode = opcode.flags;
441 vq_cmd_w0.s.param1 = mod_len;
442 vq_cmd_w0.s.dlen = dlen;
444 /* Filling cpt_request_info structure */
445 req->ist.ei0 = vq_cmd_w0.u64;
446 req->ist.ei1 = mphys;
447 req->ist.ei2 = mphys + dlen;
449 /* Result pointer to store result data */
452 /* alternate_caddr to write completion status of the microcode */
453 req->alternate_caddr = (uint64_t *)(dptr + rlen);
454 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
456 /* Preparing completion addr, +1 for completion code */
457 caddr.vaddr = dptr + rlen + 1;
458 caddr.dma_addr = mphys + dlen + rlen + 1;
460 cpt_fill_req_comp_addr(req, caddr);
463 static __rte_always_inline int __hot
464 cpt_enqueue_rsa_op(struct rte_crypto_op *op,
465 struct asym_op_params *params,
466 struct cpt_asym_sess_misc *sess)
468 struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
470 switch (rsa->op_type) {
471 case RTE_CRYPTO_ASYM_OP_VERIFY:
472 cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->sign);
474 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
475 cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->message);
477 case RTE_CRYPTO_ASYM_OP_SIGN:
478 cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->message);
480 case RTE_CRYPTO_ASYM_OP_DECRYPT:
481 cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->cipher);
484 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
490 static const struct cpt_ec_group ec_grp[CPT_EC_ID_PMAX] = {
494 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
495 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
496 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
502 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
503 0xFF, 0xFF, 0xFF, 0xFF, 0x99, 0xDE, 0xF8, 0x36,
504 0x14, 0x6B, 0xC9, 0xB1, 0xB4, 0xD2, 0x28, 0x31
512 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
513 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
514 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x01
521 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
522 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X16, 0XA2,
523 0XE0, 0XB8, 0XF0, 0X3E, 0X13, 0XDD, 0X29, 0X45,
524 0X5C, 0X5C, 0X2A, 0X3D
532 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
533 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
534 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
535 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
541 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
542 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
543 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84,
544 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51
552 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
553 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
554 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
555 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
556 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF
563 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
564 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
565 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
566 0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF,
567 0x58, 0x1A, 0x0D, 0xB2, 0x48, 0xB0, 0xA7, 0x7A,
568 0xEC, 0xEC, 0x19, 0x6A, 0xCC, 0xC5, 0x29, 0x73
576 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
577 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
578 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
579 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
580 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
581 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
582 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
583 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
590 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
591 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
592 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
593 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
594 0xFF, 0xFA, 0x51, 0x86, 0x87, 0x83, 0xBF, 0x2F,
595 0x96, 0x6B, 0x7F, 0xCC, 0x01, 0x48, 0xF7, 0x09,
596 0xA5, 0xD0, 0x3B, 0xB5, 0xC9, 0xB8, 0x89, 0x9C,
597 0x47, 0xAE, 0xBB, 0x6F, 0xB7, 0x1E, 0x91, 0x38,
605 static __rte_always_inline void
606 cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
607 struct asym_op_params *ecdsa_params,
608 uint64_t fpm_table_iova,
611 struct cpt_request_info *req = ecdsa_params->req;
612 uint16_t message_len = ecdsa->message.length;
613 phys_addr_t mphys = ecdsa_params->meta_buf;
614 uint16_t pkey_len = ecdsa->pkey.length;
615 uint16_t p_align, k_align, m_align;
616 uint16_t k_len = ecdsa->k.length;
617 uint16_t order_len, prime_len;
618 uint16_t o_offset, pk_offset;
619 vq_cmd_word0_t vq_cmd_w0;
620 opcode_info_t opcode;
625 prime_len = ec_grp[curveid].prime.length;
626 order_len = ec_grp[curveid].order.length;
628 /* Truncate input length to curve prime length */
629 if (message_len > prime_len)
630 message_len = prime_len;
631 m_align = ROUNDUP8(message_len);
633 p_align = ROUNDUP8(prime_len);
634 k_align = ROUNDUP8(k_len);
636 /* Set write offset for order and private key */
637 o_offset = prime_len - order_len;
638 pk_offset = prime_len - pkey_len;
641 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
644 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len),
645 * ROUNDUP8(priv key len, prime len, order len)).
646 * Please note, private key, order cannot exceed prime
647 * length i.e 3 * p_align.
649 dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 3;
651 memset(dptr, 0, dlen);
653 *(uint64_t *)dptr = fpm_table_iova;
654 dptr += sizeof(fpm_table_iova);
656 memcpy(dptr, ecdsa->k.data, k_len);
659 memcpy(dptr, ec_grp[curveid].prime.data, prime_len);
662 memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len);
665 memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len);
668 memcpy(dptr, ecdsa->message.data, message_len);
671 /* 2 * prime length (for sign r and s ) */
675 opcode.s.major = CPT_MAJOR_OP_ECDSA;
676 opcode.s.minor = CPT_MINOR_OP_ECDSA_SIGN;
677 vq_cmd_w0.s.opcode = opcode.flags;
680 vq_cmd_w0.s.param1 = curveid | (message_len << 8);
681 vq_cmd_w0.s.param2 = k_len;
682 vq_cmd_w0.s.dlen = dlen;
684 /* Filling cpt_request_info structure */
685 req->ist.ei0 = vq_cmd_w0.u64;
686 req->ist.ei1 = mphys;
687 req->ist.ei2 = mphys + dlen;
689 /* Result pointer to store result data */
692 /* alternate_caddr to write completion status of the microcode */
693 req->alternate_caddr = (uint64_t *)(dptr + rlen);
694 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
696 /* Preparing completion addr, +1 for completion code */
697 caddr.vaddr = dptr + rlen + 1;
698 caddr.dma_addr = mphys + dlen + rlen + 1;
700 cpt_fill_req_comp_addr(req, caddr);
703 static __rte_always_inline void
704 cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
705 struct asym_op_params *ecdsa_params,
706 uint64_t fpm_table_iova,
709 struct cpt_request_info *req = ecdsa_params->req;
710 uint32_t message_len = ecdsa->message.length;
711 phys_addr_t mphys = ecdsa_params->meta_buf;
712 uint16_t o_offset, r_offset, s_offset;
713 uint16_t qx_len = ecdsa->q.x.length;
714 uint16_t qy_len = ecdsa->q.y.length;
715 uint16_t r_len = ecdsa->r.length;
716 uint16_t s_len = ecdsa->s.length;
717 uint16_t order_len, prime_len;
718 uint16_t qx_offset, qy_offset;
719 uint16_t p_align, m_align;
720 vq_cmd_word0_t vq_cmd_w0;
721 opcode_info_t opcode;
726 prime_len = ec_grp[curveid].prime.length;
727 order_len = ec_grp[curveid].order.length;
729 /* Truncate input length to curve prime length */
730 if (message_len > prime_len)
731 message_len = prime_len;
733 m_align = ROUNDUP8(message_len);
734 p_align = ROUNDUP8(prime_len);
736 /* Set write offset for sign, order and public key coordinates */
737 o_offset = prime_len - order_len;
738 qx_offset = prime_len - qx_len;
739 qy_offset = prime_len - qy_len;
740 r_offset = prime_len - r_len;
741 s_offset = prime_len - s_len;
744 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
747 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
748 * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
749 * prime len, order len)).
750 * Please note sign, public key and order can not excede prime length
753 dlen = sizeof(fpm_table_iova) + m_align + (6 * p_align);
755 memset(dptr, 0, dlen);
757 *(uint64_t *)dptr = fpm_table_iova;
758 dptr += sizeof(fpm_table_iova);
760 memcpy(dptr + r_offset, ecdsa->r.data, r_len);
763 memcpy(dptr + s_offset, ecdsa->s.data, s_len);
766 memcpy(dptr, ecdsa->message.data, message_len);
769 memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len);
772 memcpy(dptr, ec_grp[curveid].prime.data, prime_len);
775 memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len);
778 memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len);
782 opcode.s.major = CPT_MAJOR_OP_ECDSA;
783 opcode.s.minor = CPT_MINOR_OP_ECDSA_VERIFY;
784 vq_cmd_w0.s.opcode = opcode.flags;
787 vq_cmd_w0.s.param1 = curveid | (message_len << 8);
788 vq_cmd_w0.s.param2 = 0;
789 vq_cmd_w0.s.dlen = dlen;
791 /* Filling cpt_request_info structure */
792 req->ist.ei0 = vq_cmd_w0.u64;
793 req->ist.ei1 = mphys;
794 req->ist.ei2 = mphys + dlen;
796 /* Result pointer to store result data */
799 /* alternate_caddr to write completion status of the microcode */
800 req->alternate_caddr = (uint64_t *)dptr;
801 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
803 /* Preparing completion addr, +1 for completion code */
804 caddr.vaddr = dptr + 1;
805 caddr.dma_addr = mphys + dlen + 1;
807 cpt_fill_req_comp_addr(req, caddr);
810 static __rte_always_inline int __hot
811 cpt_enqueue_ecdsa_op(struct rte_crypto_op *op,
812 struct asym_op_params *params,
813 struct cpt_asym_sess_misc *sess,
816 struct rte_crypto_ecdsa_op_param *ecdsa = &op->asym->ecdsa;
817 uint8_t curveid = sess->ec_ctx.curveid;
819 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN)
820 cpt_ecdsa_sign_prep(ecdsa, params, fpm_iova[curveid], curveid);
821 else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
822 cpt_ecdsa_verify_prep(ecdsa, params, fpm_iova[curveid],
825 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
831 #endif /* _CPT_UCODE_ASYM_H_ */