1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
5 #ifndef _CPT_UCODE_ASYM_H_
6 #define _CPT_UCODE_ASYM_H_
8 #include <rte_common.h>
9 #include <rte_crypto_asym.h>
10 #include <rte_malloc.h>
12 #include "cpt_common.h"
13 #include "cpt_hw_types.h"
14 #include "cpt_mcode_defines.h"
16 static __rte_always_inline void
17 cpt_modex_param_normalize(uint8_t **data, size_t *len)
21 /* Strip leading NUL bytes */
23 for (i = 0; i < *len; i++) {
32 static __rte_always_inline int
33 cpt_fill_modex_params(struct cpt_asym_sess_misc *sess,
34 struct rte_crypto_asym_xform *xform)
36 struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
37 size_t exp_len = xform->modex.exponent.length;
38 size_t mod_len = xform->modex.modulus.length;
39 uint8_t *exp = xform->modex.exponent.data;
40 uint8_t *mod = xform->modex.modulus.data;
42 cpt_modex_param_normalize(&mod, &mod_len);
43 cpt_modex_param_normalize(&exp, &exp_len);
45 if (unlikely(exp_len == 0 || mod_len == 0))
48 if (unlikely(exp_len > mod_len)) {
49 CPT_LOG_DP_ERR("Exponent length greater than modulus length is not supported");
53 /* Allocate buffer to hold modexp params */
54 ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
55 if (ctx->modulus.data == NULL) {
56 CPT_LOG_DP_ERR("Could not allocate buffer for modex params");
60 /* Set up modexp prime modulus and private exponent */
62 memcpy(ctx->modulus.data, mod, mod_len);
63 ctx->exponent.data = ctx->modulus.data + mod_len;
64 memcpy(ctx->exponent.data, exp, exp_len);
66 ctx->modulus.length = mod_len;
67 ctx->exponent.length = exp_len;
72 static __rte_always_inline int
73 cpt_fill_rsa_params(struct cpt_asym_sess_misc *sess,
74 struct rte_crypto_asym_xform *xform)
76 struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
77 struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
78 struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
79 size_t mod_len = xfrm_rsa->n.length;
80 size_t exp_len = xfrm_rsa->e.length;
84 /* Make sure key length used is not more than mod_len/2 */
85 if (qt.p.data != NULL)
86 len = (((mod_len / 2) < qt.p.length) ? len : qt.p.length);
88 /* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
89 total_size = mod_len + exp_len + 5 * len;
91 /* Allocate buffer to hold all RSA keys */
92 rsa->n.data = rte_malloc(NULL, total_size, 0);
93 if (rsa->n.data == NULL) {
94 CPT_LOG_DP_ERR("Could not allocate buffer for RSA keys");
98 /* Set up RSA prime modulus and public key exponent */
99 memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
100 rsa->e.data = rsa->n.data + mod_len;
101 memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
103 /* Private key in quintuple format */
105 rsa->qt.q.data = rsa->e.data + exp_len;
106 memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
107 rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
108 memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
109 rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
110 memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
111 rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
112 memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
113 rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
114 memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
116 rsa->qt.q.length = qt.q.length;
117 rsa->qt.dQ.length = qt.dQ.length;
118 rsa->qt.p.length = qt.p.length;
119 rsa->qt.dP.length = qt.dP.length;
120 rsa->qt.qInv.length = qt.qInv.length;
122 rsa->n.length = mod_len;
123 rsa->e.length = exp_len;
128 static __rte_always_inline int
129 cpt_fill_asym_session_parameters(struct cpt_asym_sess_misc *sess,
130 struct rte_crypto_asym_xform *xform)
134 sess->xfrm_type = xform->xform_type;
136 switch (xform->xform_type) {
137 case RTE_CRYPTO_ASYM_XFORM_RSA:
138 ret = cpt_fill_rsa_params(sess, xform);
140 case RTE_CRYPTO_ASYM_XFORM_MODEX:
141 ret = cpt_fill_modex_params(sess, xform);
144 CPT_LOG_DP_ERR("Unsupported transform type");
150 static __rte_always_inline void
151 cpt_free_asym_session_parameters(struct cpt_asym_sess_misc *sess)
153 struct rte_crypto_modex_xform *mod;
154 struct rte_crypto_rsa_xform *rsa;
156 switch (sess->xfrm_type) {
157 case RTE_CRYPTO_ASYM_XFORM_RSA:
158 rsa = &sess->rsa_ctx;
160 rte_free(rsa->n.data);
162 case RTE_CRYPTO_ASYM_XFORM_MODEX:
163 mod = &sess->mod_ctx;
164 if (mod->modulus.data)
165 rte_free(mod->modulus.data);
168 CPT_LOG_DP_ERR("Invalid transform type");
173 static __rte_always_inline void
174 cpt_fill_req_comp_addr(struct cpt_request_info *req, buf_ptr_t addr)
176 void *completion_addr = RTE_PTR_ALIGN(addr.vaddr, 16);
178 /* Pointer to cpt_res_s, updated by CPT */
179 req->completion_addr = (volatile uint64_t *)completion_addr;
180 req->comp_baddr = addr.dma_addr +
181 RTE_PTR_DIFF(completion_addr, addr.vaddr);
182 *(req->completion_addr) = COMPLETION_CODE_INIT;
185 static __rte_always_inline int
186 cpt_modex_prep(struct asym_op_params *modex_params,
187 struct rte_crypto_modex_xform *mod)
189 struct cpt_request_info *req = modex_params->req;
190 phys_addr_t mphys = modex_params->meta_buf;
191 uint32_t exp_len = mod->exponent.length;
192 uint32_t mod_len = mod->modulus.length;
193 struct rte_crypto_mod_op_param mod_op;
194 struct rte_crypto_op **op;
195 vq_cmd_word0_t vq_cmd_w0;
196 uint64_t total_key_len;
197 opcode_info_t opcode;
203 /* Extracting modex op form params->req->op[1]->asym->modex */
204 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
205 mod_op = ((struct rte_crypto_op *)*op)->asym->modex;
207 base_len = mod_op.base.length;
208 if (unlikely(base_len > mod_len)) {
209 CPT_LOG_DP_ERR("Base length greater than modulus length is not supported");
210 (*op)->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
214 total_key_len = mod_len + exp_len;
217 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
218 memcpy(dptr, mod->modulus.data, total_key_len);
219 dptr += total_key_len;
220 memcpy(dptr, mod_op.base.data, base_len);
222 dlen = total_key_len + base_len;
228 opcode.s.major = CPT_MAJOR_OP_MODEX;
229 opcode.s.minor = CPT_MINOR_OP_MODEX;
230 vq_cmd_w0.s.opcode = opcode.flags;
233 vq_cmd_w0.s.param1 = mod_len;
234 vq_cmd_w0.s.param2 = exp_len;
235 vq_cmd_w0.s.dlen = dlen;
237 /* Filling cpt_request_info structure */
238 req->ist.ei0 = vq_cmd_w0.u64;
239 req->ist.ei1 = mphys;
240 req->ist.ei2 = mphys + dlen;
242 /* Result pointer to store result data */
245 /* alternate_caddr to write completion status of the microcode */
246 req->alternate_caddr = (uint64_t *)(dptr + rlen);
247 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
249 /* Preparing completion addr, +1 for completion code */
250 caddr.vaddr = dptr + rlen + 1;
251 caddr.dma_addr = mphys + dlen + rlen + 1;
253 cpt_fill_req_comp_addr(req, caddr);
257 static __rte_always_inline void
258 cpt_rsa_prep(struct asym_op_params *rsa_params,
259 struct rte_crypto_rsa_xform *rsa,
260 rte_crypto_param *crypto_param)
262 struct cpt_request_info *req = rsa_params->req;
263 phys_addr_t mphys = rsa_params->meta_buf;
264 struct rte_crypto_rsa_op_param rsa_op;
265 uint32_t mod_len = rsa->n.length;
266 uint32_t exp_len = rsa->e.length;
267 struct rte_crypto_op **op;
268 vq_cmd_word0_t vq_cmd_w0;
269 uint64_t total_key_len;
270 opcode_info_t opcode;
276 /* Extracting rsa op form params->req->op[1]->asym->rsa */
277 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
278 rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
279 total_key_len = mod_len + exp_len;
282 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
283 memcpy(dptr, rsa->n.data, total_key_len);
284 dptr += total_key_len;
286 in_size = crypto_param->length;
287 memcpy(dptr, crypto_param->data, in_size);
290 dlen = total_key_len + in_size;
295 if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
296 /* Use mod_exp operation for no_padding type */
297 opcode.s.minor = CPT_MINOR_OP_MODEX;
298 vq_cmd_w0.s.param2 = exp_len;
300 if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
301 opcode.s.minor = CPT_MINOR_OP_PKCS_ENC;
302 /* Public key encrypt, use BT2*/
303 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2 |
304 ((uint16_t)(exp_len) << 1);
305 } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
306 opcode.s.minor = CPT_MINOR_OP_PKCS_DEC;
307 /* Public key decrypt, use BT1 */
308 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
309 /* + 2 for decrypted len */
315 opcode.s.major = CPT_MAJOR_OP_MODEX;
316 vq_cmd_w0.s.opcode = opcode.flags;
319 vq_cmd_w0.s.param1 = mod_len;
320 vq_cmd_w0.s.dlen = dlen;
322 /* Filling cpt_request_info structure */
323 req->ist.ei0 = vq_cmd_w0.u64;
324 req->ist.ei1 = mphys;
325 req->ist.ei2 = mphys + dlen;
327 /* Result pointer to store result data */
330 /* alternate_caddr to write completion status of the microcode */
331 req->alternate_caddr = (uint64_t *)(dptr + rlen);
332 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
334 /* Preparing completion addr, +1 for completion code */
335 caddr.vaddr = dptr + rlen + 1;
336 caddr.dma_addr = mphys + dlen + rlen + 1;
338 cpt_fill_req_comp_addr(req, caddr);
341 static __rte_always_inline void
342 cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
343 struct rte_crypto_rsa_xform *rsa,
344 rte_crypto_param *crypto_param)
346 struct cpt_request_info *req = rsa_params->req;
347 phys_addr_t mphys = rsa_params->meta_buf;
348 uint32_t qInv_len = rsa->qt.qInv.length;
349 struct rte_crypto_rsa_op_param rsa_op;
350 uint32_t dP_len = rsa->qt.dP.length;
351 uint32_t dQ_len = rsa->qt.dQ.length;
352 uint32_t p_len = rsa->qt.p.length;
353 uint32_t q_len = rsa->qt.q.length;
354 uint32_t mod_len = rsa->n.length;
355 struct rte_crypto_op **op;
356 vq_cmd_word0_t vq_cmd_w0;
357 uint64_t total_key_len;
358 opcode_info_t opcode;
364 /* Extracting rsa op form params->req->op[1]->asym->rsa */
365 op = RTE_PTR_ADD(req->op, sizeof(uintptr_t));
366 rsa_op = ((struct rte_crypto_op *)*op)->asym->rsa;
367 total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
370 dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
371 memcpy(dptr, rsa->qt.q.data, total_key_len);
372 dptr += total_key_len;
374 in_size = crypto_param->length;
375 memcpy(dptr, crypto_param->data, in_size);
378 dlen = total_key_len + in_size;
383 if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
384 /*Use mod_exp operation for no_padding type */
385 opcode.s.minor = CPT_MINOR_OP_MODEX_CRT;
387 if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
388 opcode.s.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
389 /* Private encrypt, use BT1 */
390 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
391 } else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
392 opcode.s.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
393 /* Private decrypt, use BT2 */
394 vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2;
395 /* + 2 for decrypted len */
401 opcode.s.major = CPT_MAJOR_OP_MODEX;
402 vq_cmd_w0.s.opcode = opcode.flags;
405 vq_cmd_w0.s.param1 = mod_len;
406 vq_cmd_w0.s.dlen = dlen;
408 /* Filling cpt_request_info structure */
409 req->ist.ei0 = vq_cmd_w0.u64;
410 req->ist.ei1 = mphys;
411 req->ist.ei2 = mphys + dlen;
413 /* Result pointer to store result data */
416 /* alternate_caddr to write completion status of the microcode */
417 req->alternate_caddr = (uint64_t *)(dptr + rlen);
418 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
420 /* Preparing completion addr, +1 for completion code */
421 caddr.vaddr = dptr + rlen + 1;
422 caddr.dma_addr = mphys + dlen + rlen + 1;
424 cpt_fill_req_comp_addr(req, caddr);
427 static __rte_always_inline int __hot
428 cpt_enqueue_rsa_op(struct rte_crypto_op *op,
429 struct asym_op_params *params,
430 struct cpt_asym_sess_misc *sess)
432 struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
434 switch (rsa->op_type) {
435 case RTE_CRYPTO_ASYM_OP_VERIFY:
436 cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->sign);
438 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
439 cpt_rsa_prep(params, &sess->rsa_ctx, &rsa->message);
441 case RTE_CRYPTO_ASYM_OP_SIGN:
442 cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->message);
444 case RTE_CRYPTO_ASYM_OP_DECRYPT:
445 cpt_rsa_crt_prep(params, &sess->rsa_ctx, &rsa->cipher);
448 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
453 #endif /* _CPT_UCODE_ASYM_H_ */