1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
5 #ifndef _CPT_UCODE_ASYM_H_
6 #define _CPT_UCODE_ASYM_H_
8 #include <rte_common.h>
9 #include <rte_crypto_asym.h>
10 #include <rte_malloc.h>
12 #include "cpt_common.h"
13 #include "cpt_hw_types.h"
14 #include "cpt_mcode_defines.h"
16 static __rte_always_inline void
17 cpt_modex_param_normalize(uint8_t **data, size_t *len)
21 /* Strip leading NUL bytes */
23 for (i = 0; i < *len; i++) {
32 static __rte_always_inline int
33 cpt_fill_modex_params(struct cpt_asym_sess_misc *sess,
34 struct rte_crypto_asym_xform *xform)
36 struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
37 size_t exp_len = xform->modex.exponent.length;
38 size_t mod_len = xform->modex.modulus.length;
39 uint8_t *exp = xform->modex.exponent.data;
40 uint8_t *mod = xform->modex.modulus.data;
42 cpt_modex_param_normalize(&mod, &mod_len);
43 cpt_modex_param_normalize(&exp, &exp_len);
45 if (unlikely(exp_len == 0 || mod_len == 0))
48 if (unlikely(exp_len > mod_len)) {
49 CPT_LOG_DP_ERR("Exponent length greater than modulus length is not supported");
53 /* Allocate buffer to hold modexp params */
54 ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
55 if (ctx->modulus.data == NULL) {
56 CPT_LOG_DP_ERR("Could not allocate buffer for modex params");
60 /* Set up modexp prime modulus and private exponent */
62 memcpy(ctx->modulus.data, mod, mod_len);
63 ctx->exponent.data = ctx->modulus.data + mod_len;
64 memcpy(ctx->exponent.data, exp, exp_len);
66 ctx->modulus.length = mod_len;
67 ctx->exponent.length = exp_len;
72 static __rte_always_inline int
73 cpt_fill_rsa_params(struct cpt_asym_sess_misc *sess,
74 struct rte_crypto_asym_xform *xform)
76 struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
77 struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
78 struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
79 size_t mod_len = xfrm_rsa->n.length;
80 size_t exp_len = xfrm_rsa->e.length;
84 /* Make sure key length used is not more than mod_len/2 */
85 if (qt.p.data != NULL)
86 len = (((mod_len / 2) < qt.p.length) ? len : qt.p.length);
88 /* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
89 total_size = mod_len + exp_len + 5 * len;
91 /* Allocate buffer to hold all RSA keys */
92 rsa->n.data = rte_malloc(NULL, total_size, 0);
93 if (rsa->n.data == NULL) {
94 CPT_LOG_DP_ERR("Could not allocate buffer for RSA keys");
98 /* Set up RSA prime modulus and public key exponent */
99 memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
100 rsa->e.data = rsa->n.data + mod_len;
101 memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
103 /* Private key in quintuple format */
105 rsa->qt.q.data = rsa->e.data + exp_len;
106 memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
107 rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
108 memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
109 rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
110 memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
111 rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
112 memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
113 rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
114 memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
116 rsa->qt.q.length = qt.q.length;
117 rsa->qt.dQ.length = qt.dQ.length;
118 rsa->qt.p.length = qt.p.length;
119 rsa->qt.dP.length = qt.dP.length;
120 rsa->qt.qInv.length = qt.qInv.length;
122 rsa->n.length = mod_len;
123 rsa->e.length = exp_len;
128 static __rte_always_inline int
129 cpt_fill_ec_params(struct cpt_asym_sess_misc *sess,
130 struct rte_crypto_asym_xform *xform)
132 struct cpt_asym_ec_ctx *ec = &sess->ec_ctx;
134 switch (xform->ec.curve_id) {
135 case RTE_CRYPTO_EC_GROUP_SECP192R1:
136 ec->curveid = CPT_EC_ID_P192;
138 case RTE_CRYPTO_EC_GROUP_SECP224R1:
139 ec->curveid = CPT_EC_ID_P224;
141 case RTE_CRYPTO_EC_GROUP_SECP256R1:
142 ec->curveid = CPT_EC_ID_P256;
144 case RTE_CRYPTO_EC_GROUP_SECP384R1:
145 ec->curveid = CPT_EC_ID_P384;
147 case RTE_CRYPTO_EC_GROUP_SECP521R1:
148 ec->curveid = CPT_EC_ID_P521;
151 /* Only NIST curves (FIPS 186-4) are supported */
152 CPT_LOG_DP_ERR("Unsupported curve");
159 static __rte_always_inline int
160 cpt_fill_asym_session_parameters(struct cpt_asym_sess_misc *sess,
161 struct rte_crypto_asym_xform *xform)
165 sess->xfrm_type = xform->xform_type;
167 switch (xform->xform_type) {
168 case RTE_CRYPTO_ASYM_XFORM_RSA:
169 ret = cpt_fill_rsa_params(sess, xform);
171 case RTE_CRYPTO_ASYM_XFORM_MODEX:
172 ret = cpt_fill_modex_params(sess, xform);
174 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
176 case RTE_CRYPTO_ASYM_XFORM_ECPM:
177 ret = cpt_fill_ec_params(sess, xform);
180 CPT_LOG_DP_ERR("Unsupported transform type");
186 static __rte_always_inline void
187 cpt_free_asym_session_parameters(struct cpt_asym_sess_misc *sess)
189 struct rte_crypto_modex_xform *mod;
190 struct rte_crypto_rsa_xform *rsa;
192 switch (sess->xfrm_type) {
193 case RTE_CRYPTO_ASYM_XFORM_RSA:
194 rsa = &sess->rsa_ctx;
196 rte_free(rsa->n.data);
198 case RTE_CRYPTO_ASYM_XFORM_MODEX:
199 mod = &sess->mod_ctx;
200 if (mod->modulus.data)
201 rte_free(mod->modulus.data);
203 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
205 case RTE_CRYPTO_ASYM_XFORM_ECPM:
208 CPT_LOG_DP_ERR("Invalid transform type");
213 static __rte_always_inline void
214 cpt_fill_req_comp_addr(struct cpt_request_info *req, buf_ptr_t addr)
216 void *completion_addr = RTE_PTR_ALIGN(addr.vaddr, 16);
218 /* Pointer to cpt_res_s, updated by CPT */
219 req->completion_addr = (volatile uint64_t *)completion_addr;
220 req->comp_baddr = addr.dma_addr +
221 RTE_PTR_DIFF(completion_addr, addr.vaddr);
222 *(req->completion_addr) = COMPLETION_CODE_INIT;
225 static __rte_always_inline int
226 cpt_modex_prep(struct asym_op_params *modex_params,
227 struct rte_crypto_modex_xform *mod)
229 struct cpt_request_info *req = modex_params->req;
230 phys_addr_t mphys = modex_params->meta_buf;
231 uint32_t exp_len = mod->exponent.length;
232 uint32_t mod_len = mod->modulus.length;
233 struct rte_crypto_mod_op_param mod_op;
234 struct rte_crypto_op **op;
235 vq_cmd_word0_t vq_cmd_w0;
236 uint64_t total_key_len;
237 opcode_info_t opcode;
243 /* Extracting modex op form params->req->op[1]->asym->modex */
244 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
245 mod_op = ((struct rte_crypto_op *)*op)->asym->modex;
247 base_len = mod_op.base.length;
248 if (unlikely(base_len > mod_len)) {
249 CPT_LOG_DP_ERR("Base length greater than modulus length is not supported");
250 (*op)->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
254 total_key_len = mod_len + exp_len;
257 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
258 memcpy(dptr, mod->modulus.data, total_key_len);
259 dptr += total_key_len;
260 memcpy(dptr, mod_op.base.data, base_len);
262 dlen = total_key_len + base_len;
268 opcode.s.major = CPT_MAJOR_OP_MODEX;
269 opcode.s.minor = CPT_MINOR_OP_MODEX;
270 vq_cmd_w0.s.opcode = opcode.flags;
273 vq_cmd_w0.s.param1 = mod_len;
274 vq_cmd_w0.s.param2 = exp_len;
275 vq_cmd_w0.s.dlen = dlen;
277 /* Filling cpt_request_info structure */
278 req->ist.ei0 = vq_cmd_w0.u64;
279 req->ist.ei1 = mphys;
280 req->ist.ei2 = mphys + dlen;
282 /* Result pointer to store result data */
285 /* alternate_caddr to write completion status of the microcode */
286 req->alternate_caddr = (uint64_t *)(dptr + rlen);
287 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
289 /* Preparing completion addr, +1 for completion code */
290 caddr.vaddr = dptr + rlen + 1;
291 caddr.dma_addr = mphys + dlen + rlen + 1;
293 cpt_fill_req_comp_addr(req, caddr);
297 static __rte_always_inline void
298 cpt_rsa_prep(struct asym_op_params *rsa_params,
299 struct rte_crypto_rsa_xform *rsa,
300 rte_crypto_param *crypto_param)
302 struct cpt_request_info *req = rsa_params->req;
303 phys_addr_t mphys = rsa_params->meta_buf;
304 struct rte_crypto_rsa_op_param rsa_op;
305 uint32_t mod_len = rsa->n.length;
306 uint32_t exp_len = rsa->e.length;
307 struct rte_crypto_op **op;
308 vq_cmd_word0_t vq_cmd_w0;
309 uint64_t total_key_len;
310 opcode_info_t opcode;
316 /* Extracting rsa op form params->req->op[1]->asym->rsa */
317 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
318 rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
319 total_key_len = mod_len + exp_len;
322 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
323 memcpy(dptr, rsa->n.data, total_key_len);
324 dptr += total_key_len;
326 in_size = crypto_param->length;
327 memcpy(dptr, crypto_param->data, in_size);
330 dlen = total_key_len + in_size;
335 if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
336 /* Use mod_exp operation for no_padding type */
337 opcode.s.minor = CPT_MINOR_OP_MODEX;
338 vq_cmd_w0.s.param2 = exp_len;
340 if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
341 opcode.s.minor = CPT_MINOR_OP_PKCS_ENC;
342 /* Public key encrypt, use BT2*/
343 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2 |
344 ((uint16_t)(exp_len) << 1);
345 } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
346 opcode.s.minor = CPT_MINOR_OP_PKCS_DEC;
347 /* Public key decrypt, use BT1 */
348 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
349 /* + 2 for decrypted len */
355 opcode.s.major = CPT_MAJOR_OP_MODEX;
356 vq_cmd_w0.s.opcode = opcode.flags;
359 vq_cmd_w0.s.param1 = mod_len;
360 vq_cmd_w0.s.dlen = dlen;
362 /* Filling cpt_request_info structure */
363 req->ist.ei0 = vq_cmd_w0.u64;
364 req->ist.ei1 = mphys;
365 req->ist.ei2 = mphys + dlen;
367 /* Result pointer to store result data */
370 /* alternate_caddr to write completion status of the microcode */
371 req->alternate_caddr = (uint64_t *)(dptr + rlen);
372 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
374 /* Preparing completion addr, +1 for completion code */
375 caddr.vaddr = dptr + rlen + 1;
376 caddr.dma_addr = mphys + dlen + rlen + 1;
378 cpt_fill_req_comp_addr(req, caddr);
381 static __rte_always_inline void
382 cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
383 struct rte_crypto_rsa_xform *rsa,
384 rte_crypto_param *crypto_param)
386 struct cpt_request_info *req = rsa_params->req;
387 phys_addr_t mphys = rsa_params->meta_buf;
388 uint32_t qInv_len = rsa->qt.qInv.length;
389 struct rte_crypto_rsa_op_param rsa_op;
390 uint32_t dP_len = rsa->qt.dP.length;
391 uint32_t dQ_len = rsa->qt.dQ.length;
392 uint32_t p_len = rsa->qt.p.length;
393 uint32_t q_len = rsa->qt.q.length;
394 uint32_t mod_len = rsa->n.length;
395 struct rte_crypto_op **op;
396 vq_cmd_word0_t vq_cmd_w0;
397 uint64_t total_key_len;
398 opcode_info_t opcode;
404 /* Extracting rsa op form params->req->op[1]->asym->rsa */
405 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
406 rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
407 total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
410 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
411 memcpy(dptr, rsa->qt.q.data, total_key_len);
412 dptr += total_key_len;
414 in_size = crypto_param->length;
415 memcpy(dptr, crypto_param->data, in_size);
418 dlen = total_key_len + in_size;
423 if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
424 /*Use mod_exp operation for no_padding type */
425 opcode.s.minor = CPT_MINOR_OP_MODEX_CRT;
427 if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
428 opcode.s.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
429 /* Private encrypt, use BT1 */
430 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
431 } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
432 opcode.s.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
433 /* Private decrypt, use BT2 */
434 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2;
435 /* + 2 for decrypted len */
441 opcode.s.major = CPT_MAJOR_OP_MODEX;
442 vq_cmd_w0.s.opcode = opcode.flags;
445 vq_cmd_w0.s.param1 = mod_len;
446 vq_cmd_w0.s.dlen = dlen;
448 /* Filling cpt_request_info structure */
449 req->ist.ei0 = vq_cmd_w0.u64;
450 req->ist.ei1 = mphys;
451 req->ist.ei2 = mphys + dlen;
453 /* Result pointer to store result data */
456 /* alternate_caddr to write completion status of the microcode */
457 req->alternate_caddr = (uint64_t *)(dptr + rlen);
458 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
460 /* Preparing completion addr, +1 for completion code */
461 caddr.vaddr = dptr + rlen + 1;
462 caddr.dma_addr = mphys + dlen + rlen + 1;
464 cpt_fill_req_comp_addr(req, caddr);
467 static __rte_always_inline int __rte_hot
468 cpt_enqueue_rsa_op(struct rte_crypto_op *op,
469 struct asym_op_params *params,
470 struct cpt_asym_sess_misc *sess)
472 struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
474 switch (rsa->op_type) {
475 case RTE_CRYPTO_ASYM_OP_VERIFY:
476 cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->sign);
478 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
479 cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->message);
481 case RTE_CRYPTO_ASYM_OP_SIGN:
482 cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->message);
484 case RTE_CRYPTO_ASYM_OP_DECRYPT:
485 cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->cipher);
488 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
494 static const struct cpt_ec_group ec_grp[CPT_EC_ID_PMAX] = {
498 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
499 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
500 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
506 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
507 0xFF, 0xFF, 0xFF, 0xFF, 0x99, 0xDE, 0xF8, 0x36,
508 0x14, 0x6B, 0xC9, 0xB1, 0xB4, 0xD2, 0x28, 0x31
516 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
517 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
518 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x01
525 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
526 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X16, 0XA2,
527 0XE0, 0XB8, 0XF0, 0X3E, 0X13, 0XDD, 0X29, 0X45,
528 0X5C, 0X5C, 0X2A, 0X3D
536 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01,
537 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
539 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
545 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
546 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
547 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84,
548 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51
556 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
557 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
558 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
559 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
560 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
561 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF
567 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
568 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
569 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
570 0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF,
571 0x58, 0x1A, 0x0D, 0xB2, 0x48, 0xB0, 0xA7, 0x7A,
572 0xEC, 0xEC, 0x19, 0x6A, 0xCC, 0xC5, 0x29, 0x73
580 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
581 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
582 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
583 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
584 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
585 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
586 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
587 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
594 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
595 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
596 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
597 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
598 0xFF, 0xFA, 0x51, 0x86, 0x87, 0x83, 0xBF, 0x2F,
599 0x96, 0x6B, 0x7F, 0xCC, 0x01, 0x48, 0xF7, 0x09,
600 0xA5, 0xD0, 0x3B, 0xB5, 0xC9, 0xB8, 0x89, 0x9C,
601 0x47, 0xAE, 0xBB, 0x6F, 0xB7, 0x1E, 0x91, 0x38,
609 static __rte_always_inline void
610 cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
611 struct asym_op_params *ecdsa_params,
612 uint64_t fpm_table_iova,
615 struct cpt_request_info *req = ecdsa_params->req;
616 uint16_t message_len = ecdsa->message.length;
617 phys_addr_t mphys = ecdsa_params->meta_buf;
618 uint16_t pkey_len = ecdsa->pkey.length;
619 uint16_t p_align, k_align, m_align;
620 uint16_t k_len = ecdsa->k.length;
621 uint16_t order_len, prime_len;
622 uint16_t o_offset, pk_offset;
623 vq_cmd_word0_t vq_cmd_w0;
624 opcode_info_t opcode;
629 prime_len = ec_grp[curveid].prime.length;
630 order_len = ec_grp[curveid].order.length;
632 /* Truncate input length to curve prime length */
633 if (message_len > prime_len)
634 message_len = prime_len;
635 m_align = ROUNDUP8(message_len);
637 p_align = ROUNDUP8(prime_len);
638 k_align = ROUNDUP8(k_len);
640 /* Set write offset for order and private key */
641 o_offset = prime_len - order_len;
642 pk_offset = prime_len - pkey_len;
645 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
648 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len),
649 * ROUNDUP8(priv key len, prime len, order len)).
650 * Please note, private key, order cannot exceed prime
651 * length i.e 3 * p_align.
653 dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 3;
655 memset(dptr, 0, dlen);
657 *(uint64_t *)dptr = fpm_table_iova;
658 dptr += sizeof(fpm_table_iova);
660 memcpy(dptr, ecdsa->k.data, k_len);
663 memcpy(dptr, ec_grp[curveid].prime.data, prime_len);
666 memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len);
669 memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len);
672 memcpy(dptr, ecdsa->message.data, message_len);
675 /* 2 * prime length (for sign r and s ) */
679 opcode.s.major = CPT_MAJOR_OP_ECDSA;
680 opcode.s.minor = CPT_MINOR_OP_ECDSA_SIGN;
681 vq_cmd_w0.s.opcode = opcode.flags;
684 vq_cmd_w0.s.param1 = curveid | (message_len << 8);
685 vq_cmd_w0.s.param2 = k_len;
686 vq_cmd_w0.s.dlen = dlen;
688 /* Filling cpt_request_info structure */
689 req->ist.ei0 = vq_cmd_w0.u64;
690 req->ist.ei1 = mphys;
691 req->ist.ei2 = mphys + dlen;
693 /* Result pointer to store result data */
696 /* alternate_caddr to write completion status of the microcode */
697 req->alternate_caddr = (uint64_t *)(dptr + rlen);
698 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
700 /* Preparing completion addr, +1 for completion code */
701 caddr.vaddr = dptr + rlen + 1;
702 caddr.dma_addr = mphys + dlen + rlen + 1;
704 cpt_fill_req_comp_addr(req, caddr);
707 static __rte_always_inline void
708 cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
709 struct asym_op_params *ecdsa_params,
710 uint64_t fpm_table_iova,
713 struct cpt_request_info *req = ecdsa_params->req;
714 uint32_t message_len = ecdsa->message.length;
715 phys_addr_t mphys = ecdsa_params->meta_buf;
716 uint16_t o_offset, r_offset, s_offset;
717 uint16_t qx_len = ecdsa->q.x.length;
718 uint16_t qy_len = ecdsa->q.y.length;
719 uint16_t r_len = ecdsa->r.length;
720 uint16_t s_len = ecdsa->s.length;
721 uint16_t order_len, prime_len;
722 uint16_t qx_offset, qy_offset;
723 uint16_t p_align, m_align;
724 vq_cmd_word0_t vq_cmd_w0;
725 opcode_info_t opcode;
730 prime_len = ec_grp[curveid].prime.length;
731 order_len = ec_grp[curveid].order.length;
733 /* Truncate input length to curve prime length */
734 if (message_len > prime_len)
735 message_len = prime_len;
737 m_align = ROUNDUP8(message_len);
738 p_align = ROUNDUP8(prime_len);
740 /* Set write offset for sign, order and public key coordinates */
741 o_offset = prime_len - order_len;
742 qx_offset = prime_len - qx_len;
743 qy_offset = prime_len - qy_len;
744 r_offset = prime_len - r_len;
745 s_offset = prime_len - s_len;
748 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
751 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
752 * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
753 * prime len, order len)).
754 * Please note sign, public key and order can not excede prime length
757 dlen = sizeof(fpm_table_iova) + m_align + (6 * p_align);
759 memset(dptr, 0, dlen);
761 *(uint64_t *)dptr = fpm_table_iova;
762 dptr += sizeof(fpm_table_iova);
764 memcpy(dptr + r_offset, ecdsa->r.data, r_len);
767 memcpy(dptr + s_offset, ecdsa->s.data, s_len);
770 memcpy(dptr, ecdsa->message.data, message_len);
773 memcpy(dptr + o_offset, ec_grp[curveid].order.data, order_len);
776 memcpy(dptr, ec_grp[curveid].prime.data, prime_len);
779 memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len);
782 memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len);
786 opcode.s.major = CPT_MAJOR_OP_ECDSA;
787 opcode.s.minor = CPT_MINOR_OP_ECDSA_VERIFY;
788 vq_cmd_w0.s.opcode = opcode.flags;
791 vq_cmd_w0.s.param1 = curveid | (message_len << 8);
792 vq_cmd_w0.s.param2 = 0;
793 vq_cmd_w0.s.dlen = dlen;
795 /* Filling cpt_request_info structure */
796 req->ist.ei0 = vq_cmd_w0.u64;
797 req->ist.ei1 = mphys;
798 req->ist.ei2 = mphys + dlen;
800 /* Result pointer to store result data */
803 /* alternate_caddr to write completion status of the microcode */
804 req->alternate_caddr = (uint64_t *)dptr;
805 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
807 /* Preparing completion addr, +1 for completion code */
808 caddr.vaddr = dptr + 1;
809 caddr.dma_addr = mphys + dlen + 1;
811 cpt_fill_req_comp_addr(req, caddr);
814 static __rte_always_inline int __rte_hot
815 cpt_enqueue_ecdsa_op(struct rte_crypto_op *op,
816 struct asym_op_params *params,
817 struct cpt_asym_sess_misc *sess,
820 struct rte_crypto_ecdsa_op_param *ecdsa = &op->asym->ecdsa;
821 uint8_t curveid = sess->ec_ctx.curveid;
823 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN)
824 cpt_ecdsa_sign_prep(ecdsa, params, fpm_iova[curveid], curveid);
825 else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
826 cpt_ecdsa_verify_prep(ecdsa, params, fpm_iova[curveid],
829 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
835 static __rte_always_inline int
836 cpt_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
837 struct asym_op_params *asym_params,
840 struct cpt_request_info *req = asym_params->req;
841 phys_addr_t mphys = asym_params->meta_buf;
842 uint16_t x1_len = ecpm->p.x.length;
843 uint16_t y1_len = ecpm->p.y.length;
844 uint16_t scalar_align, p_align;
845 uint16_t dlen, rlen, prime_len;
846 uint16_t x1_offset, y1_offset;
847 vq_cmd_word0_t vq_cmd_w0;
848 opcode_info_t opcode;
852 prime_len = ec_grp[curveid].prime.length;
855 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
857 p_align = ROUNDUP8(prime_len);
858 scalar_align = ROUNDUP8(ecpm->scalar.length);
861 * Set dlen = sum(ROUNDUP8(input point(x and y coordinates), prime,
863 * Please note point length is equivalent to prime of the curve
865 dlen = 3 * p_align + scalar_align;
867 x1_offset = prime_len - x1_len;
868 y1_offset = prime_len - y1_len;
870 memset(dptr, 0, dlen);
872 /* Copy input point, scalar, prime */
873 memcpy(dptr + x1_offset, ecpm->p.x.data, x1_len);
875 memcpy(dptr + y1_offset, ecpm->p.y.data, y1_len);
877 memcpy(dptr, ecpm->scalar.data, ecpm->scalar.length);
878 dptr += scalar_align;
879 memcpy(dptr, ec_grp[curveid].prime.data, ec_grp[curveid].prime.length);
883 opcode.s.major = CPT_MAJOR_OP_ECC;
884 opcode.s.minor = CPT_MINOR_OP_ECC_UMP;
887 vq_cmd_w0.s.opcode = opcode.flags;
888 vq_cmd_w0.s.param1 = curveid;
889 vq_cmd_w0.s.param2 = ecpm->scalar.length;
890 vq_cmd_w0.s.dlen = dlen;
892 /* Filling cpt_request_info structure */
893 req->ist.ei0 = vq_cmd_w0.u64;
894 req->ist.ei1 = mphys;
895 req->ist.ei2 = mphys + dlen;
897 /* Result buffer will store output point where length of
898 * each coordinate will be of prime length, thus set
899 * rlen to twice of prime length.
904 /* alternate_caddr to write completion status by the microcode */
905 req->alternate_caddr = (uint64_t *)(dptr + rlen);
906 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
908 /* Preparing completion addr, +1 for completion code */
909 caddr.vaddr = dptr + rlen + 1;
910 caddr.dma_addr = mphys + dlen + rlen + 1;
912 cpt_fill_req_comp_addr(req, caddr);
915 #endif /* _CPT_UCODE_ASYM_H_ */