crypto/qat: add named elliptic curves
[dpdk.git] / drivers / crypto / qat / qat_asym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 - 2022 Intel Corporation
3  */
4
5 #include <stdarg.h>
6
7 #include <cryptodev_pmd.h>
8
9 #include "qat_device.h"
10 #include "qat_logs.h"
11
12 #include "qat_asym.h"
13 #include "icp_qat_fw_pke.h"
14 #include "icp_qat_fw.h"
15 #include "qat_pke.h"
16 #include "qat_ec.h"
17
18 uint8_t qat_asym_driver_id;
19
20 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
21
22 /* An rte_driver is needed in the registration of both the device and the driver
23  * with cryptodev.
24  * The actual qat pci's rte_driver can't be used as its name represents
25  * the whole pci device with all services. Think of this as a holder for a name
26  * for the crypto part of the pci device.
27  */
28 static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
29 static const struct rte_driver cryptodev_qat_asym_driver = {
30         .name = qat_asym_drv_name,
31         .alias = qat_asym_drv_name
32 };
33
34 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
35 #define HEXDUMP(name, where, size) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
36                         where, size)
37 #define HEXDUMP_OFF(name, where, size, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
38                         &where[idx * size], size)
39 #else
40 #define HEXDUMP(name, where, size)
41 #define HEXDUMP_OFF(name, where, size, idx)
42 #endif
43
44 #define CHECK_IF_NOT_EMPTY(param, name, pname, status) \
45         do { \
46                 if (param.length == 0) {        \
47                         QAT_LOG(ERR,                    \
48                                 "Invalid " name \
49                                 " input parameter, zero length " pname  \
50                         );      \
51                         status = -EINVAL;       \
52                 } else if (check_zero(param)) { \
53                         QAT_LOG(ERR,    \
54                                 "Invalid " name " input parameter, empty " \
55                                 pname ", length = %d", \
56                                 (int)param.length \
57                         ); \
58                         status = -EINVAL;       \
59                 } \
60         } while (0)
61
62 #define SET_PKE_LN(where, what, how, idx) \
63         rte_memcpy(where[idx] + how - \
64                 what.length, \
65                 what.data, \
66                 what.length)
67
68 #define SET_PKE_LN_9A(where, what, how, idx) \
69                 rte_memcpy(&where[idx * RTE_ALIGN_CEIL(how, 8)] + \
70                         RTE_ALIGN_CEIL(how, 8) - \
71                         what.length, \
72                         what.data, \
73                         what.length)
74
75 #define SET_PKE_LN_EC(where, what, how, idx) \
76                 rte_memcpy(&where[idx * RTE_ALIGN_CEIL(how, 8)] + \
77                         RTE_ALIGN_CEIL(how, 8) - \
78                         how, \
79                         what.data, \
80                         how)
81
82 static void
83 request_init(struct icp_qat_fw_pke_request *qat_req)
84 {
85         memset(qat_req, 0, sizeof(*qat_req));
86         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
87         qat_req->pke_hdr.hdr_flags =
88                         ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
89                         (ICP_QAT_FW_COMN_REQ_FLAG_SET);
90 }
91
92 static void
93 cleanup_arrays(struct qat_asym_op_cookie *cookie,
94                 int in_count, int out_count, int alg_size)
95 {
96         int i;
97
98         for (i = 0; i < in_count; i++)
99                 memset(cookie->input_array[i], 0x0, alg_size);
100         for (i = 0; i < out_count; i++)
101                 memset(cookie->output_array[i], 0x0, alg_size);
102 }
103
104 static void
105 cleanup_crt(struct qat_asym_op_cookie *cookie,
106                 int alg_size)
107 {
108         int i;
109
110         memset(cookie->input_array[0], 0x0, alg_size);
111         for (i = 1; i < QAT_ASYM_RSA_QT_NUM_IN_PARAMS; i++)
112                 memset(cookie->input_array[i], 0x0, alg_size / 2);
113         for (i = 0; i < QAT_ASYM_RSA_NUM_OUT_PARAMS; i++)
114                 memset(cookie->output_array[i], 0x0, alg_size);
115 }
116
117 static void
118 cleanup(struct qat_asym_op_cookie *cookie,
119                 struct rte_crypto_asym_xform *xform, int alg_size)
120 {
121         if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX)
122                 cleanup_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
123                                 QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size);
124         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV)
125                 cleanup_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
126                                 QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
127         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
128                 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
129                         cleanup_crt(cookie, alg_size);
130                 else {
131                         cleanup_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
132                                 QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size);
133                 }
134         }
135 }
136
137 static int
138 check_zero(rte_crypto_param n)
139 {
140         int i, len = n.length;
141
142         if (len < 8) {
143                 for (i = len - 1; i >= 0; i--) {
144                         if (n.data[i] != 0x0)
145                                 return 0;
146                 }
147         } else if (len == 8 && *(uint64_t *)&n.data[len - 8] == 0) {
148                 return 1;
149         } else if (*(uint64_t *)&n.data[len - 8] == 0) {
150                 for (i = len - 9; i >= 0; i--) {
151                         if (n.data[i] != 0x0)
152                                 return 0;
153                 }
154         } else
155                 return 0;
156
157         return 1;
158 }
159
160 static struct qat_asym_function
161 get_asym_function(struct rte_crypto_asym_xform *xform)
162 {
163         struct qat_asym_function qat_function;
164
165         switch (xform->xform_type) {
166         case RTE_CRYPTO_ASYM_XFORM_MODEX:
167                 qat_function = get_modexp_function(xform);
168                 break;
169         case RTE_CRYPTO_ASYM_XFORM_MODINV:
170                 qat_function = get_modinv_function(xform);
171                 break;
172         default:
173                 qat_function.func_id = 0;
174                 break;
175         }
176
177         return qat_function;
178 }
179
180 static int
181 modexp_set_input(struct rte_crypto_asym_op *asym_op,
182                 struct icp_qat_fw_pke_request *qat_req,
183                 struct qat_asym_op_cookie *cookie,
184                 struct rte_crypto_asym_xform *xform)
185 {
186         struct qat_asym_function qat_function;
187         uint32_t alg_bytesize, func_id;
188         int status = 0;
189
190         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod exp",
191                         "modulus", status);
192         CHECK_IF_NOT_EMPTY(xform->modex.exponent, "mod exp",
193                                 "exponent", status);
194         if (status)
195                 return status;
196
197         qat_function = get_asym_function(xform);
198         func_id = qat_function.func_id;
199         if (qat_function.func_id == 0) {
200                 QAT_LOG(ERR, "Cannot obtain functionality id");
201                 return -EINVAL;
202         }
203         alg_bytesize = qat_function.bytesize;
204
205         SET_PKE_LN(cookie->input_array, asym_op->modex.base,
206                         alg_bytesize, 0);
207         SET_PKE_LN(cookie->input_array, xform->modex.exponent,
208                         alg_bytesize, 1);
209         SET_PKE_LN(cookie->input_array, xform->modex.modulus,
210                         alg_bytesize, 2);
211
212         cookie->alg_bytesize = alg_bytesize;
213         qat_req->pke_hdr.cd_pars.func_id = func_id;
214         qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
215         qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
216
217         HEXDUMP("ModExp base", cookie->input_array[0], alg_bytesize);
218         HEXDUMP("ModExp exponent", cookie->input_array[1], alg_bytesize);
219         HEXDUMP("ModExp modulus", cookie->input_array[2], alg_bytesize);
220
221         return status;
222 }
223
224 static uint8_t
225 modexp_collect(struct rte_crypto_asym_op *asym_op,
226                 struct qat_asym_op_cookie *cookie,
227                 struct rte_crypto_asym_xform *xform)
228 {
229         rte_crypto_param n = xform->modex.modulus;
230         uint32_t alg_bytesize = cookie->alg_bytesize;
231         uint8_t *modexp_result = asym_op->modex.result.data;
232
233         rte_memcpy(modexp_result,
234                 cookie->output_array[0] + alg_bytesize
235                 - n.length, n.length);
236         HEXDUMP("ModExp result", cookie->output_array[0],
237                         alg_bytesize);
238         return RTE_CRYPTO_OP_STATUS_SUCCESS;
239 }
240
241 static int
242 modinv_set_input(struct rte_crypto_asym_op *asym_op,
243                 struct icp_qat_fw_pke_request *qat_req,
244                 struct qat_asym_op_cookie *cookie,
245                 struct rte_crypto_asym_xform *xform)
246 {
247         struct qat_asym_function qat_function;
248         uint32_t alg_bytesize, func_id;
249         int status = 0;
250
251         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod inv",
252                         "modulus", status);
253         if (status)
254                 return status;
255
256         qat_function = get_asym_function(xform);
257         func_id = qat_function.func_id;
258         if (func_id == 0) {
259                 QAT_LOG(ERR, "Cannot obtain functionality id");
260                 return -EINVAL;
261         }
262         alg_bytesize = qat_function.bytesize;
263
264         SET_PKE_LN(cookie->input_array, asym_op->modinv.base,
265                         alg_bytesize, 0);
266         SET_PKE_LN(cookie->input_array, xform->modinv.modulus,
267                         alg_bytesize, 1);
268
269         cookie->alg_bytesize = alg_bytesize;
270         qat_req->pke_hdr.cd_pars.func_id = func_id;
271         qat_req->input_param_count =
272                         QAT_ASYM_MODINV_NUM_IN_PARAMS;
273         qat_req->output_param_count =
274                         QAT_ASYM_MODINV_NUM_OUT_PARAMS;
275
276         HEXDUMP("ModInv base", cookie->input_array[0], alg_bytesize);
277         HEXDUMP("ModInv modulus", cookie->input_array[1], alg_bytesize);
278
279         return 0;
280 }
281
282 static uint8_t
283 modinv_collect(struct rte_crypto_asym_op *asym_op,
284                 struct qat_asym_op_cookie *cookie,
285                 struct rte_crypto_asym_xform *xform)
286 {
287         rte_crypto_param n = xform->modinv.modulus;
288         uint8_t *modinv_result = asym_op->modinv.result.data;
289         uint32_t alg_bytesize = cookie->alg_bytesize;
290
291         rte_memcpy(modinv_result + (asym_op->modinv.result.length
292                 - n.length),
293                 cookie->output_array[0] + alg_bytesize
294                 - n.length, n.length);
295         HEXDUMP("ModInv result", cookie->output_array[0],
296                         alg_bytesize);
297         return RTE_CRYPTO_OP_STATUS_SUCCESS;
298 }
299
300 static int
301 rsa_set_pub_input(struct rte_crypto_asym_op *asym_op,
302                 struct icp_qat_fw_pke_request *qat_req,
303                 struct qat_asym_op_cookie *cookie,
304                 struct rte_crypto_asym_xform *xform)
305 {
306         struct qat_asym_function qat_function;
307         uint32_t alg_bytesize, func_id;
308         int status = 0;
309
310         qat_function = get_rsa_enc_function(xform);
311         func_id = qat_function.func_id;
312         if (func_id == 0) {
313                 QAT_LOG(ERR, "Cannot obtain functionality id");
314                 return -EINVAL;
315         }
316         alg_bytesize = qat_function.bytesize;
317
318         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
319                 switch (asym_op->rsa.pad) {
320                 case RTE_CRYPTO_RSA_PADDING_NONE:
321                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
322                                         alg_bytesize, 0);
323                         break;
324                 default:
325                         QAT_LOG(ERR,
326                                 "Invalid RSA padding (Encryption)"
327                                 );
328                         return -EINVAL;
329                 }
330                 HEXDUMP("RSA Message", cookie->input_array[0], alg_bytesize);
331         } else {
332                 switch (asym_op->rsa.pad) {
333                 case RTE_CRYPTO_RSA_PADDING_NONE:
334                         SET_PKE_LN(cookie->input_array, asym_op->rsa.sign,
335                                         alg_bytesize, 0);
336                         break;
337                 default:
338                         QAT_LOG(ERR,
339                                 "Invalid RSA padding (Verify)");
340                         return -EINVAL;
341                 }
342                 HEXDUMP("RSA Signature", cookie->input_array[0],
343                                 alg_bytesize);
344         }
345
346         SET_PKE_LN(cookie->input_array, xform->rsa.e,
347                         alg_bytesize, 1);
348         SET_PKE_LN(cookie->input_array, xform->rsa.n,
349                         alg_bytesize, 2);
350
351         cookie->alg_bytesize = alg_bytesize;
352         qat_req->pke_hdr.cd_pars.func_id = func_id;
353
354         HEXDUMP("RSA Public Key", cookie->input_array[1], alg_bytesize);
355         HEXDUMP("RSA Modulus", cookie->input_array[2], alg_bytesize);
356
357         return status;
358 }
359
360 static int
361 rsa_set_priv_input(struct rte_crypto_asym_op *asym_op,
362                 struct icp_qat_fw_pke_request *qat_req,
363                 struct qat_asym_op_cookie *cookie,
364                 struct rte_crypto_asym_xform *xform)
365 {
366         struct qat_asym_function qat_function;
367         uint32_t alg_bytesize, func_id;
368         int status = 0;
369
370         if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
371                 qat_function = get_rsa_crt_function(xform);
372                 func_id = qat_function.func_id;
373                 if (func_id == 0) {
374                         QAT_LOG(ERR, "Cannot obtain functionality id");
375                         return -EINVAL;
376                 }
377                 alg_bytesize = qat_function.bytesize;
378                 qat_req->input_param_count =
379                                 QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
380
381                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.p,
382                         (alg_bytesize >> 1), 1);
383                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.q,
384                         (alg_bytesize >> 1), 2);
385                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dP,
386                         (alg_bytesize >> 1), 3);
387                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dQ,
388                         (alg_bytesize >> 1), 4);
389                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.qInv,
390                         (alg_bytesize >> 1), 5);
391
392                 HEXDUMP("RSA p", cookie->input_array[1],
393                                 alg_bytesize);
394                 HEXDUMP("RSA q", cookie->input_array[2],
395                                 alg_bytesize);
396                 HEXDUMP("RSA dP", cookie->input_array[3],
397                                 alg_bytesize);
398                 HEXDUMP("RSA dQ", cookie->input_array[4],
399                                 alg_bytesize);
400                 HEXDUMP("RSA qInv", cookie->input_array[5],
401                                 alg_bytesize);
402         } else if (xform->rsa.key_type ==
403                         RTE_RSA_KEY_TYPE_EXP) {
404                 qat_function = get_rsa_dec_function(xform);
405                 func_id = qat_function.func_id;
406                 if (func_id == 0) {
407                         QAT_LOG(ERR, "Cannot obtain functionality id");
408                         return -EINVAL;
409                 }
410                 alg_bytesize = qat_function.bytesize;
411
412                 SET_PKE_LN(cookie->input_array, xform->rsa.d,
413                         alg_bytesize, 1);
414                 SET_PKE_LN(cookie->input_array, xform->rsa.n,
415                         alg_bytesize, 2);
416
417                 HEXDUMP("RSA d", cookie->input_array[1],
418                                 alg_bytesize);
419                 HEXDUMP("RSA n", cookie->input_array[2],
420                                 alg_bytesize);
421         } else {
422                 QAT_LOG(ERR, "Invalid RSA key type");
423                 return -EINVAL;
424         }
425
426         if (asym_op->rsa.op_type ==
427                         RTE_CRYPTO_ASYM_OP_DECRYPT) {
428                 switch (asym_op->rsa.pad) {
429                 case RTE_CRYPTO_RSA_PADDING_NONE:
430                         SET_PKE_LN(cookie->input_array, asym_op->rsa.cipher,
431                                 alg_bytesize, 0);
432                         HEXDUMP("RSA ciphertext", cookie->input_array[0],
433                                 alg_bytesize);
434                         break;
435                 default:
436                         QAT_LOG(ERR,
437                                 "Invalid padding of RSA (Decrypt)");
438                         return -(EINVAL);
439                 }
440
441         } else if (asym_op->rsa.op_type ==
442                         RTE_CRYPTO_ASYM_OP_SIGN) {
443                 switch (asym_op->rsa.pad) {
444                 case RTE_CRYPTO_RSA_PADDING_NONE:
445                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
446                                 alg_bytesize, 0);
447                         HEXDUMP("RSA text to be signed", cookie->input_array[0],
448                                 alg_bytesize);
449                         break;
450                 default:
451                         QAT_LOG(ERR,
452                                 "Invalid padding of RSA (Signature)");
453                         return -(EINVAL);
454                 }
455         }
456
457         cookie->alg_bytesize = alg_bytesize;
458         qat_req->pke_hdr.cd_pars.func_id = func_id;
459         return status;
460 }
461
462 static int
463 rsa_set_input(struct rte_crypto_asym_op *asym_op,
464                 struct icp_qat_fw_pke_request *qat_req,
465                 struct qat_asym_op_cookie *cookie,
466                 struct rte_crypto_asym_xform *xform)
467 {
468         qat_req->input_param_count =
469                         QAT_ASYM_RSA_NUM_IN_PARAMS;
470         qat_req->output_param_count =
471                         QAT_ASYM_RSA_NUM_OUT_PARAMS;
472
473         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
474                         asym_op->rsa.op_type ==
475                                 RTE_CRYPTO_ASYM_OP_VERIFY) {
476                 return rsa_set_pub_input(asym_op, qat_req, cookie, xform);
477         } else {
478                 return rsa_set_priv_input(asym_op, qat_req, cookie, xform);
479         }
480 }
481
482 static uint8_t
483 rsa_collect(struct rte_crypto_asym_op *asym_op,
484                 struct qat_asym_op_cookie *cookie)
485 {
486         uint32_t alg_bytesize = cookie->alg_bytesize;
487
488         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
489                 asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
490
491                 if (asym_op->rsa.op_type ==
492                                 RTE_CRYPTO_ASYM_OP_ENCRYPT) {
493                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
494
495                         rte_memcpy(rsa_result,
496                                         cookie->output_array[0],
497                                         alg_bytesize);
498                         HEXDUMP("RSA Encrypted data", cookie->output_array[0],
499                                 alg_bytesize);
500                 } else {
501                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
502
503                         switch (asym_op->rsa.pad) {
504                         case RTE_CRYPTO_RSA_PADDING_NONE:
505                                 rte_memcpy(rsa_result,
506                                                 cookie->output_array[0],
507                                                 alg_bytesize);
508                                 HEXDUMP("RSA signature",
509                                         cookie->output_array[0],
510                                         alg_bytesize);
511                                 break;
512                         default:
513                                 QAT_LOG(ERR, "Padding not supported");
514                                 return RTE_CRYPTO_OP_STATUS_ERROR;
515                         }
516                 }
517         } else {
518                 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
519                         uint8_t *rsa_result = asym_op->rsa.message.data;
520
521                         switch (asym_op->rsa.pad) {
522                         case RTE_CRYPTO_RSA_PADDING_NONE:
523                                 rte_memcpy(rsa_result,
524                                         cookie->output_array[0],
525                                         alg_bytesize);
526                                 HEXDUMP("RSA Decrypted Message",
527                                         cookie->output_array[0],
528                                         alg_bytesize);
529                                 break;
530                         default:
531                                 QAT_LOG(ERR, "Padding not supported");
532                                 return RTE_CRYPTO_OP_STATUS_ERROR;
533                         }
534                 } else {
535                         uint8_t *rsa_result = asym_op->rsa.sign.data;
536
537                         rte_memcpy(rsa_result,
538                                         cookie->output_array[0],
539                                         alg_bytesize);
540                         HEXDUMP("RSA Signature", cookie->output_array[0],
541                                 alg_bytesize);
542                 }
543         }
544         return RTE_CRYPTO_OP_STATUS_SUCCESS;
545 }
546
547
548 static int
549 asym_set_input(struct rte_crypto_asym_op *asym_op,
550                 struct icp_qat_fw_pke_request *qat_req,
551                 struct qat_asym_op_cookie *cookie,
552                 struct rte_crypto_asym_xform *xform)
553 {
554         switch (xform->xform_type) {
555         case RTE_CRYPTO_ASYM_XFORM_MODEX:
556                 return modexp_set_input(asym_op, qat_req,
557                                 cookie, xform);
558         case RTE_CRYPTO_ASYM_XFORM_MODINV:
559                 return modinv_set_input(asym_op, qat_req,
560                                 cookie, xform);
561         case RTE_CRYPTO_ASYM_XFORM_RSA:
562                 return rsa_set_input(asym_op, qat_req,
563                                 cookie, xform);
564         default:
565                 QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
566                 return -EINVAL;
567         }
568         return 1;
569 }
570
571 static int
572 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
573                         __rte_unused uint64_t *opaque,
574                         __rte_unused enum qat_device_gen qat_dev_gen)
575 {
576         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
577         struct rte_crypto_asym_op *asym_op = op->asym;
578         struct icp_qat_fw_pke_request *qat_req =
579                         (struct icp_qat_fw_pke_request *)out_msg;
580         struct qat_asym_op_cookie *cookie =
581                         (struct qat_asym_op_cookie *)op_cookie;
582         int err = 0;
583
584         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
585         switch (op->sess_type) {
586         case RTE_CRYPTO_OP_WITH_SESSION:
587                 QAT_LOG(ERR,
588                         "QAT asymmetric crypto PMD does not support session"
589                         );
590                 goto error;
591         case RTE_CRYPTO_OP_SESSIONLESS:
592                 request_init(qat_req);
593                 err = asym_set_input(asym_op, qat_req, cookie,
594                                 op->asym->xform);
595                 if (err) {
596                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
597                         goto error;
598                 }
599                 break;
600         default:
601                 QAT_DP_LOG(ERR, "Invalid session/xform settings");
602                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
603                 goto error;
604         }
605
606         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
607         qat_req->pke_mid.src_data_addr = cookie->input_addr;
608         qat_req->pke_mid.dest_data_addr = cookie->output_addr;
609
610         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
611
612         return 0;
613 error:
614         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
615         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
616         qat_req->output_param_count = 0;
617         qat_req->input_param_count = 0;
618         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
619         cookie->error |= err;
620
621         return 0;
622 }
623
624 static uint8_t
625 qat_asym_collect_response(struct rte_crypto_op *rx_op,
626                 struct qat_asym_op_cookie *cookie,
627                 struct rte_crypto_asym_xform *xform)
628 {
629         struct rte_crypto_asym_op *asym_op = rx_op->asym;
630
631         switch (xform->xform_type) {
632         case RTE_CRYPTO_ASYM_XFORM_MODEX:
633                 return modexp_collect(asym_op, cookie, xform);
634         case RTE_CRYPTO_ASYM_XFORM_MODINV:
635                 return modinv_collect(asym_op, cookie, xform);
636         case RTE_CRYPTO_ASYM_XFORM_RSA:
637                 return rsa_collect(asym_op, cookie);
638         default:
639                 QAT_LOG(ERR, "Not supported xform type");
640                 return  RTE_CRYPTO_OP_STATUS_ERROR;
641         }
642 }
643
644 static int
645 qat_asym_process_response(void **op, uint8_t *resp,
646                 void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
647 {
648         struct icp_qat_fw_pke_resp *resp_msg =
649                         (struct icp_qat_fw_pke_resp *)resp;
650         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
651                         (resp_msg->opaque);
652         struct qat_asym_op_cookie *cookie = op_cookie;
653
654         if (cookie->error) {
655                 cookie->error = 0;
656                 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
657                         rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
658                 QAT_DP_LOG(ERR, "Cookie status returned error");
659         } else {
660                 if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
661                         resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
662                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
663                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
664                         QAT_DP_LOG(ERR, "Asymmetric response status"
665                                         " returned error");
666                 }
667                 if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
668                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
669                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
670                         QAT_DP_LOG(ERR, "Asymmetric common status"
671                                         " returned error");
672                 }
673         }
674         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
675                 rx_op->status = qat_asym_collect_response(rx_op,
676                                         cookie, rx_op->asym->xform);
677                 cleanup(cookie, rx_op->asym->xform,
678                                         cookie->alg_bytesize);
679         }
680
681         *op = rx_op;
682         HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp));
683
684         return 1;
685 }
686
687 int
688 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
689                 struct rte_crypto_asym_xform *xform __rte_unused,
690                 struct rte_cryptodev_asym_session *sess __rte_unused)
691 {
692         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
693         return -ENOTSUP;
694 }
695
696 unsigned int
697 qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
698 {
699         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
700         return 0;
701 }
702
703 void
704 qat_asym_session_clear(struct rte_cryptodev *dev __rte_unused,
705                 struct rte_cryptodev_asym_session *sess __rte_unused)
706 {
707         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
708 }
709
710 static uint16_t
711 qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
712                 uint16_t nb_ops)
713 {
714         return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
715                         nb_ops);
716 }
717
718 static uint16_t
719 qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
720                 uint16_t nb_ops)
721 {
722         return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
723                                 nb_ops);
724 }
725
726 void
727 qat_asym_init_op_cookie(void *op_cookie)
728 {
729         int j;
730         struct qat_asym_op_cookie *cookie = op_cookie;
731
732         cookie->input_addr = rte_mempool_virt2iova(cookie) +
733                         offsetof(struct qat_asym_op_cookie,
734                                         input_params_ptrs);
735
736         cookie->output_addr = rte_mempool_virt2iova(cookie) +
737                         offsetof(struct qat_asym_op_cookie,
738                                         output_params_ptrs);
739
740         for (j = 0; j < 8; j++) {
741                 cookie->input_params_ptrs[j] =
742                                 rte_mempool_virt2iova(cookie) +
743                                 offsetof(struct qat_asym_op_cookie,
744                                                 input_array[j]);
745                 cookie->output_params_ptrs[j] =
746                                 rte_mempool_virt2iova(cookie) +
747                                 offsetof(struct qat_asym_op_cookie,
748                                                 output_array[j]);
749         }
750 }
751
752 int
753 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
754                 struct qat_dev_cmd_param *qat_dev_cmd_param)
755 {
756         struct qat_cryptodev_private *internals;
757         struct rte_cryptodev *cryptodev;
758         struct qat_device_info *qat_dev_instance =
759                 &qat_pci_devs[qat_pci_dev->qat_dev_id];
760         struct rte_cryptodev_pmd_init_params init_params = {
761                 .name = "",
762                 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
763                 .private_data_size = sizeof(struct qat_cryptodev_private)
764         };
765         struct qat_capabilities_info capa_info;
766         const struct rte_cryptodev_capabilities *capabilities;
767         const struct qat_crypto_gen_dev_ops *gen_dev_ops =
768                 &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
769         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
770         char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
771         uint64_t capa_size;
772         int i = 0;
773
774         snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
775                         qat_pci_dev->name, "asym");
776         QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
777
778         if (gen_dev_ops->cryptodev_ops == NULL) {
779                 QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
780                                 name);
781                 return -(EFAULT);
782         }
783
784         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
785                 qat_pci_dev->qat_asym_driver_id =
786                                 qat_asym_driver_id;
787         } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
788                 if (qat_pci_dev->qat_asym_driver_id !=
789                                 qat_asym_driver_id) {
790                         QAT_LOG(ERR,
791                                 "Device %s have different driver id than corresponding device in primary process",
792                                 name);
793                         return -(EFAULT);
794                 }
795         }
796
797         /* Populate subset device to use in cryptodev device creation */
798         qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
799         qat_dev_instance->asym_rte_dev.numa_node =
800                         qat_dev_instance->pci_dev->device.numa_node;
801         qat_dev_instance->asym_rte_dev.devargs = NULL;
802
803         cryptodev = rte_cryptodev_pmd_create(name,
804                         &(qat_dev_instance->asym_rte_dev), &init_params);
805
806         if (cryptodev == NULL)
807                 return -ENODEV;
808
809         qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
810         cryptodev->driver_id = qat_asym_driver_id;
811         cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
812
813         cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
814         cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
815
816         cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
817
818         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
819                 return 0;
820
821         snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
822                         "QAT_ASYM_CAPA_GEN_%d",
823                         qat_pci_dev->qat_dev_gen);
824
825         internals = cryptodev->data->dev_private;
826         internals->qat_dev = qat_pci_dev;
827         internals->dev_id = cryptodev->data->dev_id;
828
829         capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
830         capabilities = capa_info.data;
831         capa_size = capa_info.size;
832
833         internals->capa_mz = rte_memzone_lookup(capa_memz_name);
834         if (internals->capa_mz == NULL) {
835                 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
836                                 capa_size, rte_socket_id(), 0);
837                 if (internals->capa_mz == NULL) {
838                         QAT_LOG(DEBUG,
839                                 "Error allocating memzone for capabilities, "
840                                 "destroying PMD for %s",
841                                 name);
842                         rte_cryptodev_pmd_destroy(cryptodev);
843                         memset(&qat_dev_instance->asym_rte_dev, 0,
844                                 sizeof(qat_dev_instance->asym_rte_dev));
845                         return -EFAULT;
846                 }
847         }
848
849         memcpy(internals->capa_mz->addr, capabilities, capa_size);
850         internals->qat_dev_capabilities = internals->capa_mz->addr;
851
852         while (1) {
853                 if (qat_dev_cmd_param[i].name == NULL)
854                         break;
855                 if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
856                         internals->min_enq_burst_threshold =
857                                         qat_dev_cmd_param[i].val;
858                 i++;
859         }
860
861         qat_pci_dev->asym_dev = internals;
862         internals->service_type = QAT_SERVICE_ASYMMETRIC;
863         QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
864                         cryptodev->data->name, internals->dev_id);
865         return 0;
866 }
867
868 int
869 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
870 {
871         struct rte_cryptodev *cryptodev;
872
873         if (qat_pci_dev == NULL)
874                 return -ENODEV;
875         if (qat_pci_dev->asym_dev == NULL)
876                 return 0;
877         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
878                 rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
879
880         /* free crypto device */
881         cryptodev = rte_cryptodev_pmd_get_dev(
882                         qat_pci_dev->asym_dev->dev_id);
883         rte_cryptodev_pmd_destroy(cryptodev);
884         qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
885         qat_pci_dev->asym_dev = NULL;
886
887         return 0;
888 }
889
890 static struct cryptodev_driver qat_crypto_drv;
891 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
892                 cryptodev_qat_asym_driver,
893                 qat_asym_driver_id);