crypto/qat: add ECDSA algorithm
[dpdk.git] / drivers / crypto / qat / qat_asym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 - 2022 Intel Corporation
3  */
4
5 #include <stdarg.h>
6
7 #include <cryptodev_pmd.h>
8
9 #include "qat_device.h"
10 #include "qat_logs.h"
11
12 #include "qat_asym.h"
13 #include "icp_qat_fw_pke.h"
14 #include "icp_qat_fw.h"
15 #include "qat_pke.h"
16 #include "qat_ec.h"
17
18 uint8_t qat_asym_driver_id;
19
20 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
21
22 /* An rte_driver is needed in the registration of both the device and the driver
23  * with cryptodev.
24  * The actual qat pci's rte_driver can't be used as its name represents
25  * the whole pci device with all services. Think of this as a holder for a name
26  * for the crypto part of the pci device.
27  */
28 static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
29 static const struct rte_driver cryptodev_qat_asym_driver = {
30         .name = qat_asym_drv_name,
31         .alias = qat_asym_drv_name
32 };
33
34 /*
35  * Macros with suffix _F are used with some of predefinded identifiers:
36  * - cookie->input_buffer
37  * - qat_alg_bytesize
38  */
39 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
40 #define HEXDUMP(name, where, size) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
41                         where, size)
42 #define HEXDUMP_OFF(name, where, size, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
43                         &where[idx * size], size)
44
45 #define HEXDUMP_OFF_F(name, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
46                         &cookie->input_buffer[idx * qat_alg_bytesize], \
47                         qat_alg_bytesize)
48 #else
49 #define HEXDUMP(name, where, size)
50 #define HEXDUMP_OFF(name, where, size, idx)
51 #define HEXDUMP_OFF_F(name, idx)
52 #endif
53
54 #define CHECK_IF_NOT_EMPTY(param, name, pname, status) \
55         do { \
56                 if (param.length == 0) {        \
57                         QAT_LOG(ERR,                    \
58                                 "Invalid " name \
59                                 " input parameter, zero length " pname  \
60                         );      \
61                         status = -EINVAL;       \
62                 } else if (check_zero(param)) { \
63                         QAT_LOG(ERR,    \
64                                 "Invalid " name " input parameter, empty " \
65                                 pname ", length = %d", \
66                                 (int)param.length \
67                         ); \
68                         status = -EINVAL;       \
69                 } \
70         } while (0)
71
72 #define SET_PKE_LN(where, what, how, idx) \
73         rte_memcpy(where[idx] + how - \
74                 what.length, \
75                 what.data, \
76                 what.length)
77
78 #define SET_PKE_LN_9A(where, what, how, idx) \
79                 rte_memcpy(&where[idx * RTE_ALIGN_CEIL(how, 8)] + \
80                         RTE_ALIGN_CEIL(how, 8) - \
81                         what.length, \
82                         what.data, \
83                         what.length)
84
85 #define SET_PKE_LN_EC(where, what, how, idx) \
86                 rte_memcpy(&where[idx * RTE_ALIGN_CEIL(how, 8)] + \
87                         RTE_ALIGN_CEIL(how, 8) - \
88                         how, \
89                         what.data, \
90                         how)
91
92 #define SET_PKE_LN_9A_F(what, idx) \
93                 rte_memcpy(&cookie->input_buffer[idx * qat_alg_bytesize] + \
94                         qat_alg_bytesize - what.length, \
95                         what.data, what.length)
96
97 #define SET_PKE_LN_EC_F(what, how, idx) \
98                 rte_memcpy(&cookie->input_buffer[idx * \
99                         RTE_ALIGN_CEIL(how, 8)] + \
100                         RTE_ALIGN_CEIL(how, 8) - how, \
101                         what.data, how)
102
103 static void
104 request_init(struct icp_qat_fw_pke_request *qat_req)
105 {
106         memset(qat_req, 0, sizeof(*qat_req));
107         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
108         qat_req->pke_hdr.hdr_flags =
109                         ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
110                         (ICP_QAT_FW_COMN_REQ_FLAG_SET);
111 }
112
113 static void
114 cleanup_arrays(struct qat_asym_op_cookie *cookie,
115                 int in_count, int out_count, int alg_size)
116 {
117         int i;
118
119         for (i = 0; i < in_count; i++)
120                 memset(cookie->input_array[i], 0x0, alg_size);
121         for (i = 0; i < out_count; i++)
122                 memset(cookie->output_array[i], 0x0, alg_size);
123 }
124
125 static void
126 cleanup_crt(struct qat_asym_op_cookie *cookie,
127                 int alg_size)
128 {
129         int i;
130
131         memset(cookie->input_array[0], 0x0, alg_size);
132         for (i = 1; i < QAT_ASYM_RSA_QT_NUM_IN_PARAMS; i++)
133                 memset(cookie->input_array[i], 0x0, alg_size / 2);
134         for (i = 0; i < QAT_ASYM_RSA_NUM_OUT_PARAMS; i++)
135                 memset(cookie->output_array[i], 0x0, alg_size);
136 }
137
138 static void
139 cleanup(struct qat_asym_op_cookie *cookie,
140                 struct rte_crypto_asym_xform *xform, int alg_size)
141 {
142         if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX)
143                 cleanup_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
144                                 QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size);
145         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV)
146                 cleanup_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
147                                 QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
148         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
149                 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
150                         cleanup_crt(cookie, alg_size);
151                 else {
152                         cleanup_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
153                                 QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size);
154                 }
155         }
156 }
157
158 static int
159 check_zero(rte_crypto_param n)
160 {
161         int i, len = n.length;
162
163         if (len < 8) {
164                 for (i = len - 1; i >= 0; i--) {
165                         if (n.data[i] != 0x0)
166                                 return 0;
167                 }
168         } else if (len == 8 && *(uint64_t *)&n.data[len - 8] == 0) {
169                 return 1;
170         } else if (*(uint64_t *)&n.data[len - 8] == 0) {
171                 for (i = len - 9; i >= 0; i--) {
172                         if (n.data[i] != 0x0)
173                                 return 0;
174                 }
175         } else
176                 return 0;
177
178         return 1;
179 }
180
181 static struct qat_asym_function
182 get_asym_function(struct rte_crypto_asym_xform *xform)
183 {
184         struct qat_asym_function qat_function;
185
186         switch (xform->xform_type) {
187         case RTE_CRYPTO_ASYM_XFORM_MODEX:
188                 qat_function = get_modexp_function(xform);
189                 break;
190         case RTE_CRYPTO_ASYM_XFORM_MODINV:
191                 qat_function = get_modinv_function(xform);
192                 break;
193         default:
194                 qat_function.func_id = 0;
195                 break;
196         }
197
198         return qat_function;
199 }
200
201 static int
202 modexp_set_input(struct rte_crypto_asym_op *asym_op,
203                 struct icp_qat_fw_pke_request *qat_req,
204                 struct qat_asym_op_cookie *cookie,
205                 struct rte_crypto_asym_xform *xform)
206 {
207         struct qat_asym_function qat_function;
208         uint32_t alg_bytesize, func_id;
209         int status = 0;
210
211         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod exp",
212                         "modulus", status);
213         CHECK_IF_NOT_EMPTY(xform->modex.exponent, "mod exp",
214                                 "exponent", status);
215         if (status)
216                 return status;
217
218         qat_function = get_asym_function(xform);
219         func_id = qat_function.func_id;
220         if (qat_function.func_id == 0) {
221                 QAT_LOG(ERR, "Cannot obtain functionality id");
222                 return -EINVAL;
223         }
224         alg_bytesize = qat_function.bytesize;
225
226         SET_PKE_LN(cookie->input_array, asym_op->modex.base,
227                         alg_bytesize, 0);
228         SET_PKE_LN(cookie->input_array, xform->modex.exponent,
229                         alg_bytesize, 1);
230         SET_PKE_LN(cookie->input_array, xform->modex.modulus,
231                         alg_bytesize, 2);
232
233         cookie->alg_bytesize = alg_bytesize;
234         qat_req->pke_hdr.cd_pars.func_id = func_id;
235         qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
236         qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
237
238         HEXDUMP("ModExp base", cookie->input_array[0], alg_bytesize);
239         HEXDUMP("ModExp exponent", cookie->input_array[1], alg_bytesize);
240         HEXDUMP("ModExp modulus", cookie->input_array[2], alg_bytesize);
241
242         return status;
243 }
244
245 static uint8_t
246 modexp_collect(struct rte_crypto_asym_op *asym_op,
247                 struct qat_asym_op_cookie *cookie,
248                 struct rte_crypto_asym_xform *xform)
249 {
250         rte_crypto_param n = xform->modex.modulus;
251         uint32_t alg_bytesize = cookie->alg_bytesize;
252         uint8_t *modexp_result = asym_op->modex.result.data;
253
254         rte_memcpy(modexp_result,
255                 cookie->output_array[0] + alg_bytesize
256                 - n.length, n.length);
257         HEXDUMP("ModExp result", cookie->output_array[0],
258                         alg_bytesize);
259         return RTE_CRYPTO_OP_STATUS_SUCCESS;
260 }
261
262 static int
263 modinv_set_input(struct rte_crypto_asym_op *asym_op,
264                 struct icp_qat_fw_pke_request *qat_req,
265                 struct qat_asym_op_cookie *cookie,
266                 struct rte_crypto_asym_xform *xform)
267 {
268         struct qat_asym_function qat_function;
269         uint32_t alg_bytesize, func_id;
270         int status = 0;
271
272         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod inv",
273                         "modulus", status);
274         if (status)
275                 return status;
276
277         qat_function = get_asym_function(xform);
278         func_id = qat_function.func_id;
279         if (func_id == 0) {
280                 QAT_LOG(ERR, "Cannot obtain functionality id");
281                 return -EINVAL;
282         }
283         alg_bytesize = qat_function.bytesize;
284
285         SET_PKE_LN(cookie->input_array, asym_op->modinv.base,
286                         alg_bytesize, 0);
287         SET_PKE_LN(cookie->input_array, xform->modinv.modulus,
288                         alg_bytesize, 1);
289
290         cookie->alg_bytesize = alg_bytesize;
291         qat_req->pke_hdr.cd_pars.func_id = func_id;
292         qat_req->input_param_count =
293                         QAT_ASYM_MODINV_NUM_IN_PARAMS;
294         qat_req->output_param_count =
295                         QAT_ASYM_MODINV_NUM_OUT_PARAMS;
296
297         HEXDUMP("ModInv base", cookie->input_array[0], alg_bytesize);
298         HEXDUMP("ModInv modulus", cookie->input_array[1], alg_bytesize);
299
300         return 0;
301 }
302
303 static uint8_t
304 modinv_collect(struct rte_crypto_asym_op *asym_op,
305                 struct qat_asym_op_cookie *cookie,
306                 struct rte_crypto_asym_xform *xform)
307 {
308         rte_crypto_param n = xform->modinv.modulus;
309         uint8_t *modinv_result = asym_op->modinv.result.data;
310         uint32_t alg_bytesize = cookie->alg_bytesize;
311
312         rte_memcpy(modinv_result + (asym_op->modinv.result.length
313                 - n.length),
314                 cookie->output_array[0] + alg_bytesize
315                 - n.length, n.length);
316         HEXDUMP("ModInv result", cookie->output_array[0],
317                         alg_bytesize);
318         return RTE_CRYPTO_OP_STATUS_SUCCESS;
319 }
320
321 static int
322 rsa_set_pub_input(struct rte_crypto_asym_op *asym_op,
323                 struct icp_qat_fw_pke_request *qat_req,
324                 struct qat_asym_op_cookie *cookie,
325                 struct rte_crypto_asym_xform *xform)
326 {
327         struct qat_asym_function qat_function;
328         uint32_t alg_bytesize, func_id;
329         int status = 0;
330
331         qat_function = get_rsa_enc_function(xform);
332         func_id = qat_function.func_id;
333         if (func_id == 0) {
334                 QAT_LOG(ERR, "Cannot obtain functionality id");
335                 return -EINVAL;
336         }
337         alg_bytesize = qat_function.bytesize;
338
339         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
340                 switch (asym_op->rsa.pad) {
341                 case RTE_CRYPTO_RSA_PADDING_NONE:
342                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
343                                         alg_bytesize, 0);
344                         break;
345                 default:
346                         QAT_LOG(ERR,
347                                 "Invalid RSA padding (Encryption)"
348                                 );
349                         return -EINVAL;
350                 }
351                 HEXDUMP("RSA Message", cookie->input_array[0], alg_bytesize);
352         } else {
353                 switch (asym_op->rsa.pad) {
354                 case RTE_CRYPTO_RSA_PADDING_NONE:
355                         SET_PKE_LN(cookie->input_array, asym_op->rsa.sign,
356                                         alg_bytesize, 0);
357                         break;
358                 default:
359                         QAT_LOG(ERR,
360                                 "Invalid RSA padding (Verify)");
361                         return -EINVAL;
362                 }
363                 HEXDUMP("RSA Signature", cookie->input_array[0],
364                                 alg_bytesize);
365         }
366
367         SET_PKE_LN(cookie->input_array, xform->rsa.e,
368                         alg_bytesize, 1);
369         SET_PKE_LN(cookie->input_array, xform->rsa.n,
370                         alg_bytesize, 2);
371
372         cookie->alg_bytesize = alg_bytesize;
373         qat_req->pke_hdr.cd_pars.func_id = func_id;
374
375         HEXDUMP("RSA Public Key", cookie->input_array[1], alg_bytesize);
376         HEXDUMP("RSA Modulus", cookie->input_array[2], alg_bytesize);
377
378         return status;
379 }
380
381 static int
382 rsa_set_priv_input(struct rte_crypto_asym_op *asym_op,
383                 struct icp_qat_fw_pke_request *qat_req,
384                 struct qat_asym_op_cookie *cookie,
385                 struct rte_crypto_asym_xform *xform)
386 {
387         struct qat_asym_function qat_function;
388         uint32_t alg_bytesize, func_id;
389         int status = 0;
390
391         if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
392                 qat_function = get_rsa_crt_function(xform);
393                 func_id = qat_function.func_id;
394                 if (func_id == 0) {
395                         QAT_LOG(ERR, "Cannot obtain functionality id");
396                         return -EINVAL;
397                 }
398                 alg_bytesize = qat_function.bytesize;
399                 qat_req->input_param_count =
400                                 QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
401
402                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.p,
403                         (alg_bytesize >> 1), 1);
404                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.q,
405                         (alg_bytesize >> 1), 2);
406                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dP,
407                         (alg_bytesize >> 1), 3);
408                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dQ,
409                         (alg_bytesize >> 1), 4);
410                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.qInv,
411                         (alg_bytesize >> 1), 5);
412
413                 HEXDUMP("RSA p", cookie->input_array[1],
414                                 alg_bytesize);
415                 HEXDUMP("RSA q", cookie->input_array[2],
416                                 alg_bytesize);
417                 HEXDUMP("RSA dP", cookie->input_array[3],
418                                 alg_bytesize);
419                 HEXDUMP("RSA dQ", cookie->input_array[4],
420                                 alg_bytesize);
421                 HEXDUMP("RSA qInv", cookie->input_array[5],
422                                 alg_bytesize);
423         } else if (xform->rsa.key_type ==
424                         RTE_RSA_KEY_TYPE_EXP) {
425                 qat_function = get_rsa_dec_function(xform);
426                 func_id = qat_function.func_id;
427                 if (func_id == 0) {
428                         QAT_LOG(ERR, "Cannot obtain functionality id");
429                         return -EINVAL;
430                 }
431                 alg_bytesize = qat_function.bytesize;
432
433                 SET_PKE_LN(cookie->input_array, xform->rsa.d,
434                         alg_bytesize, 1);
435                 SET_PKE_LN(cookie->input_array, xform->rsa.n,
436                         alg_bytesize, 2);
437
438                 HEXDUMP("RSA d", cookie->input_array[1],
439                                 alg_bytesize);
440                 HEXDUMP("RSA n", cookie->input_array[2],
441                                 alg_bytesize);
442         } else {
443                 QAT_LOG(ERR, "Invalid RSA key type");
444                 return -EINVAL;
445         }
446
447         if (asym_op->rsa.op_type ==
448                         RTE_CRYPTO_ASYM_OP_DECRYPT) {
449                 switch (asym_op->rsa.pad) {
450                 case RTE_CRYPTO_RSA_PADDING_NONE:
451                         SET_PKE_LN(cookie->input_array, asym_op->rsa.cipher,
452                                 alg_bytesize, 0);
453                         HEXDUMP("RSA ciphertext", cookie->input_array[0],
454                                 alg_bytesize);
455                         break;
456                 default:
457                         QAT_LOG(ERR,
458                                 "Invalid padding of RSA (Decrypt)");
459                         return -(EINVAL);
460                 }
461
462         } else if (asym_op->rsa.op_type ==
463                         RTE_CRYPTO_ASYM_OP_SIGN) {
464                 switch (asym_op->rsa.pad) {
465                 case RTE_CRYPTO_RSA_PADDING_NONE:
466                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
467                                 alg_bytesize, 0);
468                         HEXDUMP("RSA text to be signed", cookie->input_array[0],
469                                 alg_bytesize);
470                         break;
471                 default:
472                         QAT_LOG(ERR,
473                                 "Invalid padding of RSA (Signature)");
474                         return -(EINVAL);
475                 }
476         }
477
478         cookie->alg_bytesize = alg_bytesize;
479         qat_req->pke_hdr.cd_pars.func_id = func_id;
480         return status;
481 }
482
483 static int
484 rsa_set_input(struct rte_crypto_asym_op *asym_op,
485                 struct icp_qat_fw_pke_request *qat_req,
486                 struct qat_asym_op_cookie *cookie,
487                 struct rte_crypto_asym_xform *xform)
488 {
489         qat_req->input_param_count =
490                         QAT_ASYM_RSA_NUM_IN_PARAMS;
491         qat_req->output_param_count =
492                         QAT_ASYM_RSA_NUM_OUT_PARAMS;
493
494         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
495                         asym_op->rsa.op_type ==
496                                 RTE_CRYPTO_ASYM_OP_VERIFY) {
497                 return rsa_set_pub_input(asym_op, qat_req, cookie, xform);
498         } else {
499                 return rsa_set_priv_input(asym_op, qat_req, cookie, xform);
500         }
501 }
502
503 static uint8_t
504 rsa_collect(struct rte_crypto_asym_op *asym_op,
505                 struct qat_asym_op_cookie *cookie)
506 {
507         uint32_t alg_bytesize = cookie->alg_bytesize;
508
509         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
510                 asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
511
512                 if (asym_op->rsa.op_type ==
513                                 RTE_CRYPTO_ASYM_OP_ENCRYPT) {
514                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
515
516                         rte_memcpy(rsa_result,
517                                         cookie->output_array[0],
518                                         alg_bytesize);
519                         HEXDUMP("RSA Encrypted data", cookie->output_array[0],
520                                 alg_bytesize);
521                 } else {
522                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
523
524                         switch (asym_op->rsa.pad) {
525                         case RTE_CRYPTO_RSA_PADDING_NONE:
526                                 rte_memcpy(rsa_result,
527                                                 cookie->output_array[0],
528                                                 alg_bytesize);
529                                 HEXDUMP("RSA signature",
530                                         cookie->output_array[0],
531                                         alg_bytesize);
532                                 break;
533                         default:
534                                 QAT_LOG(ERR, "Padding not supported");
535                                 return RTE_CRYPTO_OP_STATUS_ERROR;
536                         }
537                 }
538         } else {
539                 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
540                         uint8_t *rsa_result = asym_op->rsa.message.data;
541
542                         switch (asym_op->rsa.pad) {
543                         case RTE_CRYPTO_RSA_PADDING_NONE:
544                                 rte_memcpy(rsa_result,
545                                         cookie->output_array[0],
546                                         alg_bytesize);
547                                 HEXDUMP("RSA Decrypted Message",
548                                         cookie->output_array[0],
549                                         alg_bytesize);
550                                 break;
551                         default:
552                                 QAT_LOG(ERR, "Padding not supported");
553                                 return RTE_CRYPTO_OP_STATUS_ERROR;
554                         }
555                 } else {
556                         uint8_t *rsa_result = asym_op->rsa.sign.data;
557
558                         rte_memcpy(rsa_result,
559                                         cookie->output_array[0],
560                                         alg_bytesize);
561                         HEXDUMP("RSA Signature", cookie->output_array[0],
562                                 alg_bytesize);
563                 }
564         }
565         return RTE_CRYPTO_OP_STATUS_SUCCESS;
566 }
567
568 static int
569 ecdsa_set_input(struct rte_crypto_asym_op *asym_op,
570                 struct icp_qat_fw_pke_request *qat_req,
571                 struct qat_asym_op_cookie *cookie,
572                 struct rte_crypto_asym_xform *xform)
573 {
574         struct qat_asym_function qat_function;
575         uint32_t alg_bytesize, qat_alg_bytesize, func_id;
576         int curve_id;
577
578         curve_id = pick_curve(xform);
579         if (curve_id < 0) {
580                 QAT_LOG(ERR, "Incorrect elliptic curve");
581                 return -EINVAL;
582         }
583
584         switch (asym_op->ecdsa.op_type) {
585         case RTE_CRYPTO_ASYM_OP_SIGN:
586                 qat_function = get_ecdsa_function(xform);
587                 func_id = qat_function.func_id;
588                 if (func_id == 0) {
589                         QAT_LOG(ERR, "Cannot obtain functionality id");
590                         return -EINVAL;
591                 }
592                 alg_bytesize = qat_function.bytesize;
593                 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
594
595                 SET_PKE_LN_9A_F(asym_op->ecdsa.pkey, 0);
596                 SET_PKE_LN_9A_F(asym_op->ecdsa.message, 1);
597                 SET_PKE_LN_9A_F(asym_op->ecdsa.k, 2);
598                 SET_PKE_LN_EC_F(curve[curve_id].b, alg_bytesize, 3);
599                 SET_PKE_LN_EC_F(curve[curve_id].a, alg_bytesize, 4);
600                 SET_PKE_LN_EC_F(curve[curve_id].p, alg_bytesize, 5);
601                 SET_PKE_LN_EC_F(curve[curve_id].n, alg_bytesize, 6);
602                 SET_PKE_LN_EC_F(curve[curve_id].y, alg_bytesize, 7);
603                 SET_PKE_LN_EC_F(curve[curve_id].x, alg_bytesize, 8);
604
605                 cookie->alg_bytesize = alg_bytesize;
606                 qat_req->pke_hdr.cd_pars.func_id = func_id;
607                 qat_req->input_param_count =
608                                 QAT_ASYM_ECDSA_RS_SIGN_IN_PARAMS;
609                 qat_req->output_param_count =
610                                 QAT_ASYM_ECDSA_RS_SIGN_OUT_PARAMS;
611
612                 HEXDUMP_OFF_F("ECDSA d", 0);
613                 HEXDUMP_OFF_F("ECDSA e", 1);
614                 HEXDUMP_OFF_F("ECDSA k", 2);
615                 HEXDUMP_OFF_F("ECDSA b", 3);
616                 HEXDUMP_OFF_F("ECDSA a", 4);
617                 HEXDUMP_OFF_F("ECDSA n", 5);
618                 HEXDUMP_OFF_F("ECDSA y", 6);
619                 HEXDUMP_OFF_F("ECDSA x", 7);
620                 break;
621         case RTE_CRYPTO_ASYM_OP_VERIFY:
622                 qat_function = get_ecdsa_verify_function(xform);
623                 func_id = qat_function.func_id;
624                 if (func_id == 0) {
625                         QAT_LOG(ERR, "Cannot obtain functionality id");
626                         return -EINVAL;
627                 }
628                 alg_bytesize = qat_function.bytesize;
629                 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
630
631                 SET_PKE_LN_9A_F(asym_op->ecdsa.message, 10);
632                 SET_PKE_LN_9A_F(asym_op->ecdsa.s, 9);
633                 SET_PKE_LN_9A_F(asym_op->ecdsa.r, 8);
634                 SET_PKE_LN_EC_F(curve[curve_id].n, alg_bytesize, 7);
635                 SET_PKE_LN_EC_F(curve[curve_id].x, alg_bytesize, 6);
636                 SET_PKE_LN_EC_F(curve[curve_id].y, alg_bytesize, 5);
637                 SET_PKE_LN_9A_F(asym_op->ecdsa.q.x, 4);
638                 SET_PKE_LN_9A_F(asym_op->ecdsa.q.y, 3);
639                 SET_PKE_LN_EC_F(curve[curve_id].a, alg_bytesize, 2);
640                 SET_PKE_LN_EC_F(curve[curve_id].b, alg_bytesize, 1);
641                 SET_PKE_LN_EC_F(curve[curve_id].p, alg_bytesize, 0);
642
643                 cookie->alg_bytesize = alg_bytesize;
644                 qat_req->pke_hdr.cd_pars.func_id = func_id;
645                 qat_req->input_param_count =
646                                 QAT_ASYM_ECDSA_RS_VERIFY_IN_PARAMS;
647                 qat_req->output_param_count =
648                                 QAT_ASYM_ECDSA_RS_VERIFY_OUT_PARAMS;
649
650                 HEXDUMP_OFF_F("e", 0);
651                 HEXDUMP_OFF_F("s", 1);
652                 HEXDUMP_OFF_F("r", 2);
653                 HEXDUMP_OFF_F("n", 3);
654                 HEXDUMP_OFF_F("xG", 4);
655                 HEXDUMP_OFF_F("yG", 5);
656                 HEXDUMP_OFF_F("xQ", 6);
657                 HEXDUMP_OFF_F("yQ", 7);
658                 HEXDUMP_OFF_F("a", 8);
659                 HEXDUMP_OFF_F("b", 9);
660                 HEXDUMP_OFF_F("q", 10);
661                 break;
662         default:
663                 return -1;
664         }
665
666         return 0;
667 }
668
669 static uint8_t
670 ecdsa_collect(struct rte_crypto_asym_op *asym_op,
671                 struct qat_asym_op_cookie *cookie)
672 {
673         uint32_t alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8);
674
675         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
676                 uint8_t *r = asym_op->ecdsa.r.data;
677                 uint8_t *s = asym_op->ecdsa.s.data;
678
679                 asym_op->ecdsa.r.length = alg_bytesize;
680                 asym_op->ecdsa.s.length = alg_bytesize;
681                 rte_memcpy(r, cookie->output_array[0], alg_bytesize);
682                 rte_memcpy(s, cookie->output_array[1], alg_bytesize);
683                 HEXDUMP("R", cookie->output_array[0],
684                         alg_bytesize);
685                 HEXDUMP("S", cookie->output_array[1],
686                         alg_bytesize);
687         }
688         return RTE_CRYPTO_OP_STATUS_SUCCESS;
689 }
690
691 static int
692 asym_set_input(struct rte_crypto_asym_op *asym_op,
693                 struct icp_qat_fw_pke_request *qat_req,
694                 struct qat_asym_op_cookie *cookie,
695                 struct rte_crypto_asym_xform *xform)
696 {
697         switch (xform->xform_type) {
698         case RTE_CRYPTO_ASYM_XFORM_MODEX:
699                 return modexp_set_input(asym_op, qat_req,
700                                 cookie, xform);
701         case RTE_CRYPTO_ASYM_XFORM_MODINV:
702                 return modinv_set_input(asym_op, qat_req,
703                                 cookie, xform);
704         case RTE_CRYPTO_ASYM_XFORM_RSA:
705                 return rsa_set_input(asym_op, qat_req,
706                                 cookie, xform);
707         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
708                 return ecdsa_set_input(asym_op, qat_req,
709                                 cookie, xform);
710         default:
711                 QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
712                 return -EINVAL;
713         }
714         return 1;
715 }
716
717 static int
718 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
719                         __rte_unused uint64_t *opaque,
720                         __rte_unused enum qat_device_gen qat_dev_gen)
721 {
722         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
723         struct rte_crypto_asym_op *asym_op = op->asym;
724         struct icp_qat_fw_pke_request *qat_req =
725                         (struct icp_qat_fw_pke_request *)out_msg;
726         struct qat_asym_op_cookie *cookie =
727                         (struct qat_asym_op_cookie *)op_cookie;
728         int err = 0;
729
730         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
731         switch (op->sess_type) {
732         case RTE_CRYPTO_OP_WITH_SESSION:
733                 QAT_LOG(ERR,
734                         "QAT asymmetric crypto PMD does not support session"
735                         );
736                 goto error;
737         case RTE_CRYPTO_OP_SESSIONLESS:
738                 request_init(qat_req);
739                 err = asym_set_input(asym_op, qat_req, cookie,
740                                 op->asym->xform);
741                 if (err) {
742                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
743                         goto error;
744                 }
745                 break;
746         default:
747                 QAT_DP_LOG(ERR, "Invalid session/xform settings");
748                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
749                 goto error;
750         }
751
752         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
753         qat_req->pke_mid.src_data_addr = cookie->input_addr;
754         qat_req->pke_mid.dest_data_addr = cookie->output_addr;
755
756         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
757
758         return 0;
759 error:
760         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
761         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
762         qat_req->output_param_count = 0;
763         qat_req->input_param_count = 0;
764         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
765         cookie->error |= err;
766
767         return 0;
768 }
769
770 static uint8_t
771 qat_asym_collect_response(struct rte_crypto_op *rx_op,
772                 struct qat_asym_op_cookie *cookie,
773                 struct rte_crypto_asym_xform *xform)
774 {
775         struct rte_crypto_asym_op *asym_op = rx_op->asym;
776
777         switch (xform->xform_type) {
778         case RTE_CRYPTO_ASYM_XFORM_MODEX:
779                 return modexp_collect(asym_op, cookie, xform);
780         case RTE_CRYPTO_ASYM_XFORM_MODINV:
781                 return modinv_collect(asym_op, cookie, xform);
782         case RTE_CRYPTO_ASYM_XFORM_RSA:
783                 return rsa_collect(asym_op, cookie);
784         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
785                 return ecdsa_collect(asym_op, cookie);
786         default:
787                 QAT_LOG(ERR, "Not supported xform type");
788                 return  RTE_CRYPTO_OP_STATUS_ERROR;
789         }
790 }
791
792 static int
793 qat_asym_process_response(void **op, uint8_t *resp,
794                 void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
795 {
796         struct icp_qat_fw_pke_resp *resp_msg =
797                         (struct icp_qat_fw_pke_resp *)resp;
798         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
799                         (resp_msg->opaque);
800         struct qat_asym_op_cookie *cookie = op_cookie;
801
802         if (cookie->error) {
803                 cookie->error = 0;
804                 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
805                         rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
806                 QAT_DP_LOG(ERR, "Cookie status returned error");
807         } else {
808                 if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
809                         resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
810                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
811                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
812                         QAT_DP_LOG(ERR, "Asymmetric response status"
813                                         " returned error");
814                 }
815                 if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
816                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
817                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
818                         QAT_DP_LOG(ERR, "Asymmetric common status"
819                                         " returned error");
820                 }
821         }
822         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
823                 rx_op->status = qat_asym_collect_response(rx_op,
824                                         cookie, rx_op->asym->xform);
825                 cleanup(cookie, rx_op->asym->xform,
826                                         cookie->alg_bytesize);
827         }
828
829         *op = rx_op;
830         HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp));
831
832         return 1;
833 }
834
835 int
836 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
837                 struct rte_crypto_asym_xform *xform __rte_unused,
838                 struct rte_cryptodev_asym_session *sess __rte_unused)
839 {
840         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
841         return -ENOTSUP;
842 }
843
844 unsigned int
845 qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
846 {
847         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
848         return 0;
849 }
850
851 void
852 qat_asym_session_clear(struct rte_cryptodev *dev __rte_unused,
853                 struct rte_cryptodev_asym_session *sess __rte_unused)
854 {
855         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
856 }
857
858 static uint16_t
859 qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
860                 uint16_t nb_ops)
861 {
862         return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
863                         nb_ops);
864 }
865
866 static uint16_t
867 qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
868                 uint16_t nb_ops)
869 {
870         return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
871                                 nb_ops);
872 }
873
874 void
875 qat_asym_init_op_cookie(void *op_cookie)
876 {
877         int j;
878         struct qat_asym_op_cookie *cookie = op_cookie;
879
880         cookie->input_addr = rte_mempool_virt2iova(cookie) +
881                         offsetof(struct qat_asym_op_cookie,
882                                         input_params_ptrs);
883
884         cookie->output_addr = rte_mempool_virt2iova(cookie) +
885                         offsetof(struct qat_asym_op_cookie,
886                                         output_params_ptrs);
887
888         for (j = 0; j < 8; j++) {
889                 cookie->input_params_ptrs[j] =
890                                 rte_mempool_virt2iova(cookie) +
891                                 offsetof(struct qat_asym_op_cookie,
892                                                 input_array[j]);
893                 cookie->output_params_ptrs[j] =
894                                 rte_mempool_virt2iova(cookie) +
895                                 offsetof(struct qat_asym_op_cookie,
896                                                 output_array[j]);
897         }
898 }
899
900 int
901 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
902                 struct qat_dev_cmd_param *qat_dev_cmd_param)
903 {
904         struct qat_cryptodev_private *internals;
905         struct rte_cryptodev *cryptodev;
906         struct qat_device_info *qat_dev_instance =
907                 &qat_pci_devs[qat_pci_dev->qat_dev_id];
908         struct rte_cryptodev_pmd_init_params init_params = {
909                 .name = "",
910                 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
911                 .private_data_size = sizeof(struct qat_cryptodev_private)
912         };
913         struct qat_capabilities_info capa_info;
914         const struct rte_cryptodev_capabilities *capabilities;
915         const struct qat_crypto_gen_dev_ops *gen_dev_ops =
916                 &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
917         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
918         char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
919         uint64_t capa_size;
920         int i = 0;
921
922         snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
923                         qat_pci_dev->name, "asym");
924         QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
925
926         if (gen_dev_ops->cryptodev_ops == NULL) {
927                 QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
928                                 name);
929                 return -(EFAULT);
930         }
931
932         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
933                 qat_pci_dev->qat_asym_driver_id =
934                                 qat_asym_driver_id;
935         } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
936                 if (qat_pci_dev->qat_asym_driver_id !=
937                                 qat_asym_driver_id) {
938                         QAT_LOG(ERR,
939                                 "Device %s have different driver id than corresponding device in primary process",
940                                 name);
941                         return -(EFAULT);
942                 }
943         }
944
945         /* Populate subset device to use in cryptodev device creation */
946         qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
947         qat_dev_instance->asym_rte_dev.numa_node =
948                         qat_dev_instance->pci_dev->device.numa_node;
949         qat_dev_instance->asym_rte_dev.devargs = NULL;
950
951         cryptodev = rte_cryptodev_pmd_create(name,
952                         &(qat_dev_instance->asym_rte_dev), &init_params);
953
954         if (cryptodev == NULL)
955                 return -ENODEV;
956
957         qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
958         cryptodev->driver_id = qat_asym_driver_id;
959         cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
960
961         cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
962         cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
963
964         cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
965
966         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
967                 return 0;
968
969         snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
970                         "QAT_ASYM_CAPA_GEN_%d",
971                         qat_pci_dev->qat_dev_gen);
972
973         internals = cryptodev->data->dev_private;
974         internals->qat_dev = qat_pci_dev;
975         internals->dev_id = cryptodev->data->dev_id;
976
977         capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
978         capabilities = capa_info.data;
979         capa_size = capa_info.size;
980
981         internals->capa_mz = rte_memzone_lookup(capa_memz_name);
982         if (internals->capa_mz == NULL) {
983                 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
984                                 capa_size, rte_socket_id(), 0);
985                 if (internals->capa_mz == NULL) {
986                         QAT_LOG(DEBUG,
987                                 "Error allocating memzone for capabilities, "
988                                 "destroying PMD for %s",
989                                 name);
990                         rte_cryptodev_pmd_destroy(cryptodev);
991                         memset(&qat_dev_instance->asym_rte_dev, 0,
992                                 sizeof(qat_dev_instance->asym_rte_dev));
993                         return -EFAULT;
994                 }
995         }
996
997         memcpy(internals->capa_mz->addr, capabilities, capa_size);
998         internals->qat_dev_capabilities = internals->capa_mz->addr;
999
1000         while (1) {
1001                 if (qat_dev_cmd_param[i].name == NULL)
1002                         break;
1003                 if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
1004                         internals->min_enq_burst_threshold =
1005                                         qat_dev_cmd_param[i].val;
1006                 i++;
1007         }
1008
1009         qat_pci_dev->asym_dev = internals;
1010         internals->service_type = QAT_SERVICE_ASYMMETRIC;
1011         QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
1012                         cryptodev->data->name, internals->dev_id);
1013         return 0;
1014 }
1015
1016 int
1017 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
1018 {
1019         struct rte_cryptodev *cryptodev;
1020
1021         if (qat_pci_dev == NULL)
1022                 return -ENODEV;
1023         if (qat_pci_dev->asym_dev == NULL)
1024                 return 0;
1025         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1026                 rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
1027
1028         /* free crypto device */
1029         cryptodev = rte_cryptodev_pmd_get_dev(
1030                         qat_pci_dev->asym_dev->dev_id);
1031         rte_cryptodev_pmd_destroy(cryptodev);
1032         qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
1033         qat_pci_dev->asym_dev = NULL;
1034
1035         return 0;
1036 }
1037
1038 static struct cryptodev_driver qat_crypto_drv;
1039 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
1040                 cryptodev_qat_asym_driver,
1041                 qat_asym_driver_id);