crypto/qat: refactor asymmetric crypto functions
[dpdk.git] / drivers / crypto / qat / qat_asym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 - 2022 Intel Corporation
3  */
4
5 #include <stdarg.h>
6
7 #include <cryptodev_pmd.h>
8
9 #include "qat_device.h"
10 #include "qat_logs.h"
11
12 #include "qat_asym.h"
13 #include "icp_qat_fw_pke.h"
14 #include "icp_qat_fw.h"
15 #include "qat_pke.h"
16
17 uint8_t qat_asym_driver_id;
18
19 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
20
21 /* An rte_driver is needed in the registration of both the device and the driver
22  * with cryptodev.
23  * The actual qat pci's rte_driver can't be used as its name represents
24  * the whole pci device with all services. Think of this as a holder for a name
25  * for the crypto part of the pci device.
26  */
27 static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
28 static const struct rte_driver cryptodev_qat_asym_driver = {
29         .name = qat_asym_drv_name,
30         .alias = qat_asym_drv_name
31 };
32
33 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
34 #define HEXDUMP(name, where, size) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
35                         where, size)
36 #define HEXDUMP_OFF(name, where, size, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
37                         &where[idx * size], size)
38 #else
39 #define HEXDUMP(name, where, size)
40 #define HEXDUMP_OFF(name, where, size, idx)
41 #endif
42
43 #define CHECK_IF_NOT_EMPTY(param, name, pname, status) \
44         do { \
45                 if (param.length == 0) {        \
46                         QAT_LOG(ERR,                    \
47                                 "Invalid " name \
48                                 " input parameter, zero length " pname  \
49                         );      \
50                         status = -EINVAL;       \
51                 } else if (check_zero(param)) { \
52                         QAT_LOG(ERR,    \
53                                 "Invalid " name " input parameter, empty " \
54                                 pname ", length = %d", \
55                                 (int)param.length \
56                         ); \
57                         status = -EINVAL;       \
58                 } \
59         } while (0)
60
61 #define SET_PKE_LN(where, what, how, idx) \
62         rte_memcpy(where[idx] + how - \
63                 what.length, \
64                 what.data, \
65                 what.length)
66
67 static void
68 request_init(struct icp_qat_fw_pke_request *qat_req)
69 {
70         memset(qat_req, 0, sizeof(*qat_req));
71         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
72         qat_req->pke_hdr.hdr_flags =
73                         ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
74                         (ICP_QAT_FW_COMN_REQ_FLAG_SET);
75 }
76
77 static void
78 cleanup_arrays(struct qat_asym_op_cookie *cookie,
79                 int in_count, int out_count, int alg_size)
80 {
81         int i;
82
83         for (i = 0; i < in_count; i++)
84                 memset(cookie->input_array[i], 0x0, alg_size);
85         for (i = 0; i < out_count; i++)
86                 memset(cookie->output_array[i], 0x0, alg_size);
87 }
88
89 static void
90 cleanup_crt(struct qat_asym_op_cookie *cookie,
91                 int alg_size)
92 {
93         int i;
94
95         memset(cookie->input_array[0], 0x0, alg_size);
96         for (i = 1; i < QAT_ASYM_RSA_QT_NUM_IN_PARAMS; i++)
97                 memset(cookie->input_array[i], 0x0, alg_size / 2);
98         for (i = 0; i < QAT_ASYM_RSA_NUM_OUT_PARAMS; i++)
99                 memset(cookie->output_array[i], 0x0, alg_size);
100 }
101
102 static void
103 cleanup(struct qat_asym_op_cookie *cookie,
104                 struct rte_crypto_asym_xform *xform, int alg_size)
105 {
106         if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX)
107                 cleanup_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
108                                 QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size);
109         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV)
110                 cleanup_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
111                                 QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
112         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
113                 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
114                         cleanup_crt(cookie, alg_size);
115                 else {
116                         cleanup_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
117                                 QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size);
118                 }
119         }
120 }
121
122 static int
123 check_zero(rte_crypto_param n)
124 {
125         int i, len = n.length;
126
127         if (len < 8) {
128                 for (i = len - 1; i >= 0; i--) {
129                         if (n.data[i] != 0x0)
130                                 return 0;
131                 }
132         } else if (len == 8 && *(uint64_t *)&n.data[len - 8] == 0) {
133                 return 1;
134         } else if (*(uint64_t *)&n.data[len - 8] == 0) {
135                 for (i = len - 9; i >= 0; i--) {
136                         if (n.data[i] != 0x0)
137                                 return 0;
138                 }
139         } else
140                 return 0;
141
142         return 1;
143 }
144
145 static struct qat_asym_function
146 get_asym_function(struct rte_crypto_asym_xform *xform)
147 {
148         struct qat_asym_function qat_function;
149
150         switch (xform->xform_type) {
151         case RTE_CRYPTO_ASYM_XFORM_MODEX:
152                 qat_function = get_modexp_function(xform);
153                 break;
154         case RTE_CRYPTO_ASYM_XFORM_MODINV:
155                 qat_function = get_modinv_function(xform);
156                 break;
157         default:
158                 qat_function.func_id = 0;
159                 break;
160         }
161
162         return qat_function;
163 }
164
165 static int
166 modexp_set_input(struct rte_crypto_asym_op *asym_op,
167                 struct icp_qat_fw_pke_request *qat_req,
168                 struct qat_asym_op_cookie *cookie,
169                 struct rte_crypto_asym_xform *xform)
170 {
171         struct qat_asym_function qat_function;
172         uint32_t alg_bytesize, func_id;
173         int status = 0;
174
175         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod exp",
176                         "modulus", status);
177         CHECK_IF_NOT_EMPTY(xform->modex.exponent, "mod exp",
178                                 "exponent", status);
179         if (status)
180                 return status;
181
182         qat_function = get_asym_function(xform);
183         func_id = qat_function.func_id;
184         if (qat_function.func_id == 0) {
185                 QAT_LOG(ERR, "Cannot obtain functionality id");
186                 return -EINVAL;
187         }
188         alg_bytesize = qat_function.bytesize;
189
190         SET_PKE_LN(cookie->input_array, asym_op->modex.base,
191                         alg_bytesize, 0);
192         SET_PKE_LN(cookie->input_array, xform->modex.exponent,
193                         alg_bytesize, 1);
194         SET_PKE_LN(cookie->input_array, xform->modex.modulus,
195                         alg_bytesize, 2);
196
197         cookie->alg_bytesize = alg_bytesize;
198         qat_req->pke_hdr.cd_pars.func_id = func_id;
199         qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
200         qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
201
202         HEXDUMP("ModExp base", cookie->input_array[0], alg_bytesize);
203         HEXDUMP("ModExp exponent", cookie->input_array[1], alg_bytesize);
204         HEXDUMP("ModExp modulus", cookie->input_array[2], alg_bytesize);
205
206         return status;
207 }
208
209 static uint8_t
210 modexp_collect(struct rte_crypto_asym_op *asym_op,
211                 struct qat_asym_op_cookie *cookie,
212                 struct rte_crypto_asym_xform *xform)
213 {
214         rte_crypto_param n = xform->modex.modulus;
215         uint32_t alg_bytesize = cookie->alg_bytesize;
216         uint8_t *modexp_result = asym_op->modex.result.data;
217
218         rte_memcpy(modexp_result,
219                 cookie->output_array[0] + alg_bytesize
220                 - n.length, n.length);
221         HEXDUMP("ModExp result", cookie->output_array[0],
222                         alg_bytesize);
223         return RTE_CRYPTO_OP_STATUS_SUCCESS;
224 }
225
226 static int
227 modinv_set_input(struct rte_crypto_asym_op *asym_op,
228                 struct icp_qat_fw_pke_request *qat_req,
229                 struct qat_asym_op_cookie *cookie,
230                 struct rte_crypto_asym_xform *xform)
231 {
232         struct qat_asym_function qat_function;
233         uint32_t alg_bytesize, func_id;
234         int status = 0;
235
236         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod inv",
237                         "modulus", status);
238         if (status)
239                 return status;
240
241         qat_function = get_asym_function(xform);
242         func_id = qat_function.func_id;
243         if (func_id == 0) {
244                 QAT_LOG(ERR, "Cannot obtain functionality id");
245                 return -EINVAL;
246         }
247         alg_bytesize = qat_function.bytesize;
248
249         SET_PKE_LN(cookie->input_array, asym_op->modinv.base,
250                         alg_bytesize, 0);
251         SET_PKE_LN(cookie->input_array, xform->modinv.modulus,
252                         alg_bytesize, 1);
253
254         cookie->alg_bytesize = alg_bytesize;
255         qat_req->pke_hdr.cd_pars.func_id = func_id;
256         qat_req->input_param_count =
257                         QAT_ASYM_MODINV_NUM_IN_PARAMS;
258         qat_req->output_param_count =
259                         QAT_ASYM_MODINV_NUM_OUT_PARAMS;
260
261         HEXDUMP("ModInv base", cookie->input_array[0], alg_bytesize);
262         HEXDUMP("ModInv modulus", cookie->input_array[1], alg_bytesize);
263
264         return 0;
265 }
266
267 static uint8_t
268 modinv_collect(struct rte_crypto_asym_op *asym_op,
269                 struct qat_asym_op_cookie *cookie,
270                 struct rte_crypto_asym_xform *xform)
271 {
272         rte_crypto_param n = xform->modinv.modulus;
273         uint8_t *modinv_result = asym_op->modinv.result.data;
274         uint32_t alg_bytesize = cookie->alg_bytesize;
275
276         rte_memcpy(modinv_result + (asym_op->modinv.result.length
277                 - n.length),
278                 cookie->output_array[0] + alg_bytesize
279                 - n.length, n.length);
280         HEXDUMP("ModInv result", cookie->output_array[0],
281                         alg_bytesize);
282         return RTE_CRYPTO_OP_STATUS_SUCCESS;
283 }
284
285 static int
286 rsa_set_pub_input(struct rte_crypto_asym_op *asym_op,
287                 struct icp_qat_fw_pke_request *qat_req,
288                 struct qat_asym_op_cookie *cookie,
289                 struct rte_crypto_asym_xform *xform)
290 {
291         struct qat_asym_function qat_function;
292         uint32_t alg_bytesize, func_id;
293         int status = 0;
294
295         qat_function = get_rsa_enc_function(xform);
296         func_id = qat_function.func_id;
297         if (func_id == 0) {
298                 QAT_LOG(ERR, "Cannot obtain functionality id");
299                 return -EINVAL;
300         }
301         alg_bytesize = qat_function.bytesize;
302
303         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
304                 switch (asym_op->rsa.pad) {
305                 case RTE_CRYPTO_RSA_PADDING_NONE:
306                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
307                                         alg_bytesize, 0);
308                         break;
309                 default:
310                         QAT_LOG(ERR,
311                                 "Invalid RSA padding (Encryption)"
312                                 );
313                         return -EINVAL;
314                 }
315                 HEXDUMP("RSA Message", cookie->input_array[0], alg_bytesize);
316         } else {
317                 switch (asym_op->rsa.pad) {
318                 case RTE_CRYPTO_RSA_PADDING_NONE:
319                         SET_PKE_LN(cookie->input_array, asym_op->rsa.sign,
320                                         alg_bytesize, 0);
321                         break;
322                 default:
323                         QAT_LOG(ERR,
324                                 "Invalid RSA padding (Verify)");
325                         return -EINVAL;
326                 }
327                 HEXDUMP("RSA Signature", cookie->input_array[0],
328                                 alg_bytesize);
329         }
330
331         SET_PKE_LN(cookie->input_array, xform->rsa.e,
332                         alg_bytesize, 1);
333         SET_PKE_LN(cookie->input_array, xform->rsa.n,
334                         alg_bytesize, 2);
335
336         cookie->alg_bytesize = alg_bytesize;
337         qat_req->pke_hdr.cd_pars.func_id = func_id;
338
339         HEXDUMP("RSA Public Key", cookie->input_array[1], alg_bytesize);
340         HEXDUMP("RSA Modulus", cookie->input_array[2], alg_bytesize);
341
342         return status;
343 }
344
345 static int
346 rsa_set_priv_input(struct rte_crypto_asym_op *asym_op,
347                 struct icp_qat_fw_pke_request *qat_req,
348                 struct qat_asym_op_cookie *cookie,
349                 struct rte_crypto_asym_xform *xform)
350 {
351         struct qat_asym_function qat_function;
352         uint32_t alg_bytesize, func_id;
353         int status = 0;
354
355         if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
356                 qat_function = get_rsa_crt_function(xform);
357                 func_id = qat_function.func_id;
358                 if (func_id == 0) {
359                         QAT_LOG(ERR, "Cannot obtain functionality id");
360                         return -EINVAL;
361                 }
362                 alg_bytesize = qat_function.bytesize;
363                 qat_req->input_param_count =
364                                 QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
365
366                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.p,
367                         (alg_bytesize >> 1), 1);
368                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.q,
369                         (alg_bytesize >> 1), 2);
370                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dP,
371                         (alg_bytesize >> 1), 3);
372                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dQ,
373                         (alg_bytesize >> 1), 4);
374                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.qInv,
375                         (alg_bytesize >> 1), 5);
376
377                 HEXDUMP("RSA p", cookie->input_array[1],
378                                 alg_bytesize);
379                 HEXDUMP("RSA q", cookie->input_array[2],
380                                 alg_bytesize);
381                 HEXDUMP("RSA dP", cookie->input_array[3],
382                                 alg_bytesize);
383                 HEXDUMP("RSA dQ", cookie->input_array[4],
384                                 alg_bytesize);
385                 HEXDUMP("RSA qInv", cookie->input_array[5],
386                                 alg_bytesize);
387         } else if (xform->rsa.key_type ==
388                         RTE_RSA_KEY_TYPE_EXP) {
389                 qat_function = get_rsa_dec_function(xform);
390                 func_id = qat_function.func_id;
391                 if (func_id == 0) {
392                         QAT_LOG(ERR, "Cannot obtain functionality id");
393                         return -EINVAL;
394                 }
395                 alg_bytesize = qat_function.bytesize;
396
397                 SET_PKE_LN(cookie->input_array, xform->rsa.d,
398                         alg_bytesize, 1);
399                 SET_PKE_LN(cookie->input_array, xform->rsa.n,
400                         alg_bytesize, 2);
401
402                 HEXDUMP("RSA d", cookie->input_array[1],
403                                 alg_bytesize);
404                 HEXDUMP("RSA n", cookie->input_array[2],
405                                 alg_bytesize);
406         } else {
407                 QAT_LOG(ERR, "Invalid RSA key type");
408                 return -EINVAL;
409         }
410
411         if (asym_op->rsa.op_type ==
412                         RTE_CRYPTO_ASYM_OP_DECRYPT) {
413                 switch (asym_op->rsa.pad) {
414                 case RTE_CRYPTO_RSA_PADDING_NONE:
415                         SET_PKE_LN(cookie->input_array, asym_op->rsa.cipher,
416                                 alg_bytesize, 0);
417                         HEXDUMP("RSA ciphertext", cookie->input_array[0],
418                                 alg_bytesize);
419                         break;
420                 default:
421                         QAT_LOG(ERR,
422                                 "Invalid padding of RSA (Decrypt)");
423                         return -(EINVAL);
424                 }
425
426         } else if (asym_op->rsa.op_type ==
427                         RTE_CRYPTO_ASYM_OP_SIGN) {
428                 switch (asym_op->rsa.pad) {
429                 case RTE_CRYPTO_RSA_PADDING_NONE:
430                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
431                                 alg_bytesize, 0);
432                         HEXDUMP("RSA text to be signed", cookie->input_array[0],
433                                 alg_bytesize);
434                         break;
435                 default:
436                         QAT_LOG(ERR,
437                                 "Invalid padding of RSA (Signature)");
438                         return -(EINVAL);
439                 }
440         }
441
442         cookie->alg_bytesize = alg_bytesize;
443         qat_req->pke_hdr.cd_pars.func_id = func_id;
444         return status;
445 }
446
447 static int
448 rsa_set_input(struct rte_crypto_asym_op *asym_op,
449                 struct icp_qat_fw_pke_request *qat_req,
450                 struct qat_asym_op_cookie *cookie,
451                 struct rte_crypto_asym_xform *xform)
452 {
453         qat_req->input_param_count =
454                         QAT_ASYM_RSA_NUM_IN_PARAMS;
455         qat_req->output_param_count =
456                         QAT_ASYM_RSA_NUM_OUT_PARAMS;
457
458         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
459                         asym_op->rsa.op_type ==
460                                 RTE_CRYPTO_ASYM_OP_VERIFY) {
461                 return rsa_set_pub_input(asym_op, qat_req, cookie, xform);
462         } else {
463                 return rsa_set_priv_input(asym_op, qat_req, cookie, xform);
464         }
465 }
466
467 static uint8_t
468 rsa_collect(struct rte_crypto_asym_op *asym_op,
469                 struct qat_asym_op_cookie *cookie)
470 {
471         uint32_t alg_bytesize = cookie->alg_bytesize;
472
473         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
474                 asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
475
476                 if (asym_op->rsa.op_type ==
477                                 RTE_CRYPTO_ASYM_OP_ENCRYPT) {
478                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
479
480                         rte_memcpy(rsa_result,
481                                         cookie->output_array[0],
482                                         alg_bytesize);
483                         HEXDUMP("RSA Encrypted data", cookie->output_array[0],
484                                 alg_bytesize);
485                 } else {
486                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
487
488                         switch (asym_op->rsa.pad) {
489                         case RTE_CRYPTO_RSA_PADDING_NONE:
490                                 rte_memcpy(rsa_result,
491                                                 cookie->output_array[0],
492                                                 alg_bytesize);
493                                 HEXDUMP("RSA signature",
494                                         cookie->output_array[0],
495                                         alg_bytesize);
496                                 break;
497                         default:
498                                 QAT_LOG(ERR, "Padding not supported");
499                                 return RTE_CRYPTO_OP_STATUS_ERROR;
500                         }
501                 }
502         } else {
503                 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
504                         uint8_t *rsa_result = asym_op->rsa.message.data;
505
506                         switch (asym_op->rsa.pad) {
507                         case RTE_CRYPTO_RSA_PADDING_NONE:
508                                 rte_memcpy(rsa_result,
509                                         cookie->output_array[0],
510                                         alg_bytesize);
511                                 HEXDUMP("RSA Decrypted Message",
512                                         cookie->output_array[0],
513                                         alg_bytesize);
514                                 break;
515                         default:
516                                 QAT_LOG(ERR, "Padding not supported");
517                                 return RTE_CRYPTO_OP_STATUS_ERROR;
518                         }
519                 } else {
520                         uint8_t *rsa_result = asym_op->rsa.sign.data;
521
522                         rte_memcpy(rsa_result,
523                                         cookie->output_array[0],
524                                         alg_bytesize);
525                         HEXDUMP("RSA Signature", cookie->output_array[0],
526                                 alg_bytesize);
527                 }
528         }
529         return RTE_CRYPTO_OP_STATUS_SUCCESS;
530 }
531
532
533 static int
534 asym_set_input(struct rte_crypto_asym_op *asym_op,
535                 struct icp_qat_fw_pke_request *qat_req,
536                 struct qat_asym_op_cookie *cookie,
537                 struct rte_crypto_asym_xform *xform)
538 {
539         switch (xform->xform_type) {
540         case RTE_CRYPTO_ASYM_XFORM_MODEX:
541                 return modexp_set_input(asym_op, qat_req,
542                                 cookie, xform);
543         case RTE_CRYPTO_ASYM_XFORM_MODINV:
544                 return modinv_set_input(asym_op, qat_req,
545                                 cookie, xform);
546         case RTE_CRYPTO_ASYM_XFORM_RSA:
547                 return rsa_set_input(asym_op, qat_req,
548                                 cookie, xform);
549         default:
550                 QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
551                 return -EINVAL;
552         }
553         return 1;
554 }
555
556 static int
557 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
558                         __rte_unused uint64_t *opaque,
559                         __rte_unused enum qat_device_gen qat_dev_gen)
560 {
561         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
562         struct rte_crypto_asym_op *asym_op = op->asym;
563         struct icp_qat_fw_pke_request *qat_req =
564                         (struct icp_qat_fw_pke_request *)out_msg;
565         struct qat_asym_op_cookie *cookie =
566                         (struct qat_asym_op_cookie *)op_cookie;
567         int err = 0;
568
569         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
570         switch (op->sess_type) {
571         case RTE_CRYPTO_OP_WITH_SESSION:
572                 QAT_LOG(ERR,
573                         "QAT asymmetric crypto PMD does not support session"
574                         );
575                 goto error;
576         case RTE_CRYPTO_OP_SESSIONLESS:
577                 request_init(qat_req);
578                 err = asym_set_input(asym_op, qat_req, cookie,
579                                 op->asym->xform);
580                 if (err) {
581                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
582                         goto error;
583                 }
584                 break;
585         default:
586                 QAT_DP_LOG(ERR, "Invalid session/xform settings");
587                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
588                 goto error;
589         }
590
591         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
592         qat_req->pke_mid.src_data_addr = cookie->input_addr;
593         qat_req->pke_mid.dest_data_addr = cookie->output_addr;
594
595         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
596
597         return 0;
598 error:
599         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
600         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
601         qat_req->output_param_count = 0;
602         qat_req->input_param_count = 0;
603         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
604         cookie->error |= err;
605
606         return 0;
607 }
608
609 static uint8_t
610 qat_asym_collect_response(struct rte_crypto_op *rx_op,
611                 struct qat_asym_op_cookie *cookie,
612                 struct rte_crypto_asym_xform *xform)
613 {
614         struct rte_crypto_asym_op *asym_op = rx_op->asym;
615
616         switch (xform->xform_type) {
617         case RTE_CRYPTO_ASYM_XFORM_MODEX:
618                 return modexp_collect(asym_op, cookie, xform);
619         case RTE_CRYPTO_ASYM_XFORM_MODINV:
620                 return modinv_collect(asym_op, cookie, xform);
621         case RTE_CRYPTO_ASYM_XFORM_RSA:
622                 return rsa_collect(asym_op, cookie);
623         default:
624                 QAT_LOG(ERR, "Not supported xform type");
625                 return  RTE_CRYPTO_OP_STATUS_ERROR;
626         }
627 }
628
629 static int
630 qat_asym_process_response(void **op, uint8_t *resp,
631                 void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
632 {
633         struct icp_qat_fw_pke_resp *resp_msg =
634                         (struct icp_qat_fw_pke_resp *)resp;
635         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
636                         (resp_msg->opaque);
637         struct qat_asym_op_cookie *cookie = op_cookie;
638
639         if (cookie->error) {
640                 cookie->error = 0;
641                 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
642                         rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
643                 QAT_DP_LOG(ERR, "Cookie status returned error");
644         } else {
645                 if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
646                         resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
647                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
648                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
649                         QAT_DP_LOG(ERR, "Asymmetric response status"
650                                         " returned error");
651                 }
652                 if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
653                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
654                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
655                         QAT_DP_LOG(ERR, "Asymmetric common status"
656                                         " returned error");
657                 }
658         }
659         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
660                 rx_op->status = qat_asym_collect_response(rx_op,
661                                         cookie, rx_op->asym->xform);
662                 cleanup(cookie, rx_op->asym->xform,
663                                         cookie->alg_bytesize);
664         }
665
666         *op = rx_op;
667         HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp));
668
669         return 1;
670 }
671
672 int
673 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
674                 struct rte_crypto_asym_xform *xform __rte_unused,
675                 struct rte_cryptodev_asym_session *sess __rte_unused)
676 {
677         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
678         return -ENOTSUP;
679 }
680
681 unsigned int
682 qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
683 {
684         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
685         return 0;
686 }
687
688 void
689 qat_asym_session_clear(struct rte_cryptodev *dev __rte_unused,
690                 struct rte_cryptodev_asym_session *sess __rte_unused)
691 {
692         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
693 }
694
695 static uint16_t
696 qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
697                 uint16_t nb_ops)
698 {
699         return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
700                         nb_ops);
701 }
702
703 static uint16_t
704 qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
705                 uint16_t nb_ops)
706 {
707         return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
708                                 nb_ops);
709 }
710
711 void
712 qat_asym_init_op_cookie(void *op_cookie)
713 {
714         int j;
715         struct qat_asym_op_cookie *cookie = op_cookie;
716
717         cookie->input_addr = rte_mempool_virt2iova(cookie) +
718                         offsetof(struct qat_asym_op_cookie,
719                                         input_params_ptrs);
720
721         cookie->output_addr = rte_mempool_virt2iova(cookie) +
722                         offsetof(struct qat_asym_op_cookie,
723                                         output_params_ptrs);
724
725         for (j = 0; j < 8; j++) {
726                 cookie->input_params_ptrs[j] =
727                                 rte_mempool_virt2iova(cookie) +
728                                 offsetof(struct qat_asym_op_cookie,
729                                                 input_array[j]);
730                 cookie->output_params_ptrs[j] =
731                                 rte_mempool_virt2iova(cookie) +
732                                 offsetof(struct qat_asym_op_cookie,
733                                                 output_array[j]);
734         }
735 }
736
737 int
738 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
739                 struct qat_dev_cmd_param *qat_dev_cmd_param)
740 {
741         struct qat_cryptodev_private *internals;
742         struct rte_cryptodev *cryptodev;
743         struct qat_device_info *qat_dev_instance =
744                 &qat_pci_devs[qat_pci_dev->qat_dev_id];
745         struct rte_cryptodev_pmd_init_params init_params = {
746                 .name = "",
747                 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
748                 .private_data_size = sizeof(struct qat_cryptodev_private)
749         };
750         struct qat_capabilities_info capa_info;
751         const struct rte_cryptodev_capabilities *capabilities;
752         const struct qat_crypto_gen_dev_ops *gen_dev_ops =
753                 &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
754         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
755         char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
756         uint64_t capa_size;
757         int i = 0;
758
759         snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
760                         qat_pci_dev->name, "asym");
761         QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
762
763         if (gen_dev_ops->cryptodev_ops == NULL) {
764                 QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
765                                 name);
766                 return -(EFAULT);
767         }
768
769         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
770                 qat_pci_dev->qat_asym_driver_id =
771                                 qat_asym_driver_id;
772         } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
773                 if (qat_pci_dev->qat_asym_driver_id !=
774                                 qat_asym_driver_id) {
775                         QAT_LOG(ERR,
776                                 "Device %s have different driver id than corresponding device in primary process",
777                                 name);
778                         return -(EFAULT);
779                 }
780         }
781
782         /* Populate subset device to use in cryptodev device creation */
783         qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
784         qat_dev_instance->asym_rte_dev.numa_node =
785                         qat_dev_instance->pci_dev->device.numa_node;
786         qat_dev_instance->asym_rte_dev.devargs = NULL;
787
788         cryptodev = rte_cryptodev_pmd_create(name,
789                         &(qat_dev_instance->asym_rte_dev), &init_params);
790
791         if (cryptodev == NULL)
792                 return -ENODEV;
793
794         qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
795         cryptodev->driver_id = qat_asym_driver_id;
796         cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
797
798         cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
799         cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
800
801         cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
802
803         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
804                 return 0;
805
806         snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
807                         "QAT_ASYM_CAPA_GEN_%d",
808                         qat_pci_dev->qat_dev_gen);
809
810         internals = cryptodev->data->dev_private;
811         internals->qat_dev = qat_pci_dev;
812         internals->dev_id = cryptodev->data->dev_id;
813
814         capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
815         capabilities = capa_info.data;
816         capa_size = capa_info.size;
817
818         internals->capa_mz = rte_memzone_lookup(capa_memz_name);
819         if (internals->capa_mz == NULL) {
820                 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
821                                 capa_size, rte_socket_id(), 0);
822                 if (internals->capa_mz == NULL) {
823                         QAT_LOG(DEBUG,
824                                 "Error allocating memzone for capabilities, "
825                                 "destroying PMD for %s",
826                                 name);
827                         rte_cryptodev_pmd_destroy(cryptodev);
828                         memset(&qat_dev_instance->asym_rte_dev, 0,
829                                 sizeof(qat_dev_instance->asym_rte_dev));
830                         return -EFAULT;
831                 }
832         }
833
834         memcpy(internals->capa_mz->addr, capabilities, capa_size);
835         internals->qat_dev_capabilities = internals->capa_mz->addr;
836
837         while (1) {
838                 if (qat_dev_cmd_param[i].name == NULL)
839                         break;
840                 if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
841                         internals->min_enq_burst_threshold =
842                                         qat_dev_cmd_param[i].val;
843                 i++;
844         }
845
846         qat_pci_dev->asym_dev = internals;
847         internals->service_type = QAT_SERVICE_ASYMMETRIC;
848         QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
849                         cryptodev->data->name, internals->dev_id);
850         return 0;
851 }
852
853 int
854 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
855 {
856         struct rte_cryptodev *cryptodev;
857
858         if (qat_pci_dev == NULL)
859                 return -ENODEV;
860         if (qat_pci_dev->asym_dev == NULL)
861                 return 0;
862         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
863                 rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
864
865         /* free crypto device */
866         cryptodev = rte_cryptodev_pmd_get_dev(
867                         qat_pci_dev->asym_dev->dev_id);
868         rte_cryptodev_pmd_destroy(cryptodev);
869         qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
870         qat_pci_dev->asym_dev = NULL;
871
872         return 0;
873 }
874
875 static struct cryptodev_driver qat_crypto_drv;
876 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
877                 cryptodev_qat_asym_driver,
878                 qat_asym_driver_id);