crypto/qat: add ECPM algorithm
[dpdk.git] / drivers / crypto / qat / qat_asym.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 - 2022 Intel Corporation
3  */
4
5 #include <stdarg.h>
6
7 #include <cryptodev_pmd.h>
8
9 #include "qat_device.h"
10 #include "qat_logs.h"
11
12 #include "qat_asym.h"
13 #include "icp_qat_fw_pke.h"
14 #include "icp_qat_fw.h"
15 #include "qat_pke.h"
16 #include "qat_ec.h"
17
18 uint8_t qat_asym_driver_id;
19
20 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
21
22 /* An rte_driver is needed in the registration of both the device and the driver
23  * with cryptodev.
24  * The actual qat pci's rte_driver can't be used as its name represents
25  * the whole pci device with all services. Think of this as a holder for a name
26  * for the crypto part of the pci device.
27  */
28 static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
29 static const struct rte_driver cryptodev_qat_asym_driver = {
30         .name = qat_asym_drv_name,
31         .alias = qat_asym_drv_name
32 };
33
34 /*
35  * Macros with suffix _F are used with some of predefinded identifiers:
36  * - cookie->input_buffer
37  * - qat_alg_bytesize
38  */
39 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
40 #define HEXDUMP(name, where, size) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
41                         where, size)
42 #define HEXDUMP_OFF(name, where, size, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
43                         &where[idx * size], size)
44
45 #define HEXDUMP_OFF_F(name, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
46                         &cookie->input_buffer[idx * qat_alg_bytesize], \
47                         qat_alg_bytesize)
48 #else
49 #define HEXDUMP(name, where, size)
50 #define HEXDUMP_OFF(name, where, size, idx)
51 #define HEXDUMP_OFF_F(name, idx)
52 #endif
53
54 #define CHECK_IF_NOT_EMPTY(param, name, pname, status) \
55         do { \
56                 if (param.length == 0) {        \
57                         QAT_LOG(ERR,                    \
58                                 "Invalid " name \
59                                 " input parameter, zero length " pname  \
60                         );      \
61                         status = -EINVAL;       \
62                 } else if (check_zero(param)) { \
63                         QAT_LOG(ERR,    \
64                                 "Invalid " name " input parameter, empty " \
65                                 pname ", length = %d", \
66                                 (int)param.length \
67                         ); \
68                         status = -EINVAL;       \
69                 } \
70         } while (0)
71
72 #define SET_PKE_LN(where, what, how, idx) \
73         rte_memcpy(where[idx] + how - \
74                 what.length, \
75                 what.data, \
76                 what.length)
77
78 #define SET_PKE_LN_9A(where, what, how, idx) \
79                 rte_memcpy(&where[idx * RTE_ALIGN_CEIL(how, 8)] + \
80                         RTE_ALIGN_CEIL(how, 8) - \
81                         what.length, \
82                         what.data, \
83                         what.length)
84
85 #define SET_PKE_LN_EC(where, what, how, idx) \
86                 rte_memcpy(where[idx] + \
87                         RTE_ALIGN_CEIL(how, 8) - \
88                         how, \
89                         what.data, \
90                         how)
91
92 #define SET_PKE_LN_9A_F(what, idx) \
93                 rte_memcpy(&cookie->input_buffer[idx * qat_alg_bytesize] + \
94                         qat_alg_bytesize - what.length, \
95                         what.data, what.length)
96
97 #define SET_PKE_LN_EC_F(what, how, idx) \
98                 rte_memcpy(&cookie->input_buffer[idx * \
99                         RTE_ALIGN_CEIL(how, 8)] + \
100                         RTE_ALIGN_CEIL(how, 8) - how, \
101                         what.data, how)
102
103 static void
104 request_init(struct icp_qat_fw_pke_request *qat_req)
105 {
106         memset(qat_req, 0, sizeof(*qat_req));
107         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
108         qat_req->pke_hdr.hdr_flags =
109                         ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
110                         (ICP_QAT_FW_COMN_REQ_FLAG_SET);
111 }
112
113 static void
114 cleanup_arrays(struct qat_asym_op_cookie *cookie,
115                 int in_count, int out_count, int alg_size)
116 {
117         int i;
118
119         for (i = 0; i < in_count; i++)
120                 memset(cookie->input_array[i], 0x0, alg_size);
121         for (i = 0; i < out_count; i++)
122                 memset(cookie->output_array[i], 0x0, alg_size);
123 }
124
125 static void
126 cleanup_crt(struct qat_asym_op_cookie *cookie,
127                 int alg_size)
128 {
129         int i;
130
131         memset(cookie->input_array[0], 0x0, alg_size);
132         for (i = 1; i < QAT_ASYM_RSA_QT_NUM_IN_PARAMS; i++)
133                 memset(cookie->input_array[i], 0x0, alg_size / 2);
134         for (i = 0; i < QAT_ASYM_RSA_NUM_OUT_PARAMS; i++)
135                 memset(cookie->output_array[i], 0x0, alg_size);
136 }
137
138 static void
139 cleanup(struct qat_asym_op_cookie *cookie,
140                 struct rte_crypto_asym_xform *xform, int alg_size)
141 {
142         if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX)
143                 cleanup_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
144                                 QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size);
145         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV)
146                 cleanup_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
147                                 QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
148         else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
149                 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
150                         cleanup_crt(cookie, alg_size);
151                 else {
152                         cleanup_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
153                                 QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size);
154                 }
155         }
156 }
157
158 static int
159 check_zero(rte_crypto_param n)
160 {
161         int i, len = n.length;
162
163         if (len < 8) {
164                 for (i = len - 1; i >= 0; i--) {
165                         if (n.data[i] != 0x0)
166                                 return 0;
167                 }
168         } else if (len == 8 && *(uint64_t *)&n.data[len - 8] == 0) {
169                 return 1;
170         } else if (*(uint64_t *)&n.data[len - 8] == 0) {
171                 for (i = len - 9; i >= 0; i--) {
172                         if (n.data[i] != 0x0)
173                                 return 0;
174                 }
175         } else
176                 return 0;
177
178         return 1;
179 }
180
181 static struct qat_asym_function
182 get_asym_function(struct rte_crypto_asym_xform *xform)
183 {
184         struct qat_asym_function qat_function;
185
186         switch (xform->xform_type) {
187         case RTE_CRYPTO_ASYM_XFORM_MODEX:
188                 qat_function = get_modexp_function(xform);
189                 break;
190         case RTE_CRYPTO_ASYM_XFORM_MODINV:
191                 qat_function = get_modinv_function(xform);
192                 break;
193         default:
194                 qat_function.func_id = 0;
195                 break;
196         }
197
198         return qat_function;
199 }
200
201 static int
202 modexp_set_input(struct rte_crypto_asym_op *asym_op,
203                 struct icp_qat_fw_pke_request *qat_req,
204                 struct qat_asym_op_cookie *cookie,
205                 struct rte_crypto_asym_xform *xform)
206 {
207         struct qat_asym_function qat_function;
208         uint32_t alg_bytesize, func_id;
209         int status = 0;
210
211         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod exp",
212                         "modulus", status);
213         CHECK_IF_NOT_EMPTY(xform->modex.exponent, "mod exp",
214                                 "exponent", status);
215         if (status)
216                 return status;
217
218         qat_function = get_asym_function(xform);
219         func_id = qat_function.func_id;
220         if (qat_function.func_id == 0) {
221                 QAT_LOG(ERR, "Cannot obtain functionality id");
222                 return -EINVAL;
223         }
224         alg_bytesize = qat_function.bytesize;
225
226         SET_PKE_LN(cookie->input_array, asym_op->modex.base,
227                         alg_bytesize, 0);
228         SET_PKE_LN(cookie->input_array, xform->modex.exponent,
229                         alg_bytesize, 1);
230         SET_PKE_LN(cookie->input_array, xform->modex.modulus,
231                         alg_bytesize, 2);
232
233         cookie->alg_bytesize = alg_bytesize;
234         qat_req->pke_hdr.cd_pars.func_id = func_id;
235         qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
236         qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
237
238         HEXDUMP("ModExp base", cookie->input_array[0], alg_bytesize);
239         HEXDUMP("ModExp exponent", cookie->input_array[1], alg_bytesize);
240         HEXDUMP("ModExp modulus", cookie->input_array[2], alg_bytesize);
241
242         return status;
243 }
244
245 static uint8_t
246 modexp_collect(struct rte_crypto_asym_op *asym_op,
247                 struct qat_asym_op_cookie *cookie,
248                 struct rte_crypto_asym_xform *xform)
249 {
250         rte_crypto_param n = xform->modex.modulus;
251         uint32_t alg_bytesize = cookie->alg_bytesize;
252         uint8_t *modexp_result = asym_op->modex.result.data;
253
254         rte_memcpy(modexp_result,
255                 cookie->output_array[0] + alg_bytesize
256                 - n.length, n.length);
257         HEXDUMP("ModExp result", cookie->output_array[0],
258                         alg_bytesize);
259         return RTE_CRYPTO_OP_STATUS_SUCCESS;
260 }
261
262 static int
263 modinv_set_input(struct rte_crypto_asym_op *asym_op,
264                 struct icp_qat_fw_pke_request *qat_req,
265                 struct qat_asym_op_cookie *cookie,
266                 struct rte_crypto_asym_xform *xform)
267 {
268         struct qat_asym_function qat_function;
269         uint32_t alg_bytesize, func_id;
270         int status = 0;
271
272         CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod inv",
273                         "modulus", status);
274         if (status)
275                 return status;
276
277         qat_function = get_asym_function(xform);
278         func_id = qat_function.func_id;
279         if (func_id == 0) {
280                 QAT_LOG(ERR, "Cannot obtain functionality id");
281                 return -EINVAL;
282         }
283         alg_bytesize = qat_function.bytesize;
284
285         SET_PKE_LN(cookie->input_array, asym_op->modinv.base,
286                         alg_bytesize, 0);
287         SET_PKE_LN(cookie->input_array, xform->modinv.modulus,
288                         alg_bytesize, 1);
289
290         cookie->alg_bytesize = alg_bytesize;
291         qat_req->pke_hdr.cd_pars.func_id = func_id;
292         qat_req->input_param_count =
293                         QAT_ASYM_MODINV_NUM_IN_PARAMS;
294         qat_req->output_param_count =
295                         QAT_ASYM_MODINV_NUM_OUT_PARAMS;
296
297         HEXDUMP("ModInv base", cookie->input_array[0], alg_bytesize);
298         HEXDUMP("ModInv modulus", cookie->input_array[1], alg_bytesize);
299
300         return 0;
301 }
302
303 static uint8_t
304 modinv_collect(struct rte_crypto_asym_op *asym_op,
305                 struct qat_asym_op_cookie *cookie,
306                 struct rte_crypto_asym_xform *xform)
307 {
308         rte_crypto_param n = xform->modinv.modulus;
309         uint8_t *modinv_result = asym_op->modinv.result.data;
310         uint32_t alg_bytesize = cookie->alg_bytesize;
311
312         rte_memcpy(modinv_result + (asym_op->modinv.result.length
313                 - n.length),
314                 cookie->output_array[0] + alg_bytesize
315                 - n.length, n.length);
316         HEXDUMP("ModInv result", cookie->output_array[0],
317                         alg_bytesize);
318         return RTE_CRYPTO_OP_STATUS_SUCCESS;
319 }
320
321 static int
322 rsa_set_pub_input(struct rte_crypto_asym_op *asym_op,
323                 struct icp_qat_fw_pke_request *qat_req,
324                 struct qat_asym_op_cookie *cookie,
325                 struct rte_crypto_asym_xform *xform)
326 {
327         struct qat_asym_function qat_function;
328         uint32_t alg_bytesize, func_id;
329         int status = 0;
330
331         qat_function = get_rsa_enc_function(xform);
332         func_id = qat_function.func_id;
333         if (func_id == 0) {
334                 QAT_LOG(ERR, "Cannot obtain functionality id");
335                 return -EINVAL;
336         }
337         alg_bytesize = qat_function.bytesize;
338
339         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
340                 switch (asym_op->rsa.pad) {
341                 case RTE_CRYPTO_RSA_PADDING_NONE:
342                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
343                                         alg_bytesize, 0);
344                         break;
345                 default:
346                         QAT_LOG(ERR,
347                                 "Invalid RSA padding (Encryption)"
348                                 );
349                         return -EINVAL;
350                 }
351                 HEXDUMP("RSA Message", cookie->input_array[0], alg_bytesize);
352         } else {
353                 switch (asym_op->rsa.pad) {
354                 case RTE_CRYPTO_RSA_PADDING_NONE:
355                         SET_PKE_LN(cookie->input_array, asym_op->rsa.sign,
356                                         alg_bytesize, 0);
357                         break;
358                 default:
359                         QAT_LOG(ERR,
360                                 "Invalid RSA padding (Verify)");
361                         return -EINVAL;
362                 }
363                 HEXDUMP("RSA Signature", cookie->input_array[0],
364                                 alg_bytesize);
365         }
366
367         SET_PKE_LN(cookie->input_array, xform->rsa.e,
368                         alg_bytesize, 1);
369         SET_PKE_LN(cookie->input_array, xform->rsa.n,
370                         alg_bytesize, 2);
371
372         cookie->alg_bytesize = alg_bytesize;
373         qat_req->pke_hdr.cd_pars.func_id = func_id;
374
375         HEXDUMP("RSA Public Key", cookie->input_array[1], alg_bytesize);
376         HEXDUMP("RSA Modulus", cookie->input_array[2], alg_bytesize);
377
378         return status;
379 }
380
381 static int
382 rsa_set_priv_input(struct rte_crypto_asym_op *asym_op,
383                 struct icp_qat_fw_pke_request *qat_req,
384                 struct qat_asym_op_cookie *cookie,
385                 struct rte_crypto_asym_xform *xform)
386 {
387         struct qat_asym_function qat_function;
388         uint32_t alg_bytesize, func_id;
389         int status = 0;
390
391         if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
392                 qat_function = get_rsa_crt_function(xform);
393                 func_id = qat_function.func_id;
394                 if (func_id == 0) {
395                         QAT_LOG(ERR, "Cannot obtain functionality id");
396                         return -EINVAL;
397                 }
398                 alg_bytesize = qat_function.bytesize;
399                 qat_req->input_param_count =
400                                 QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
401
402                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.p,
403                         (alg_bytesize >> 1), 1);
404                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.q,
405                         (alg_bytesize >> 1), 2);
406                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dP,
407                         (alg_bytesize >> 1), 3);
408                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dQ,
409                         (alg_bytesize >> 1), 4);
410                 SET_PKE_LN(cookie->input_array, xform->rsa.qt.qInv,
411                         (alg_bytesize >> 1), 5);
412
413                 HEXDUMP("RSA p", cookie->input_array[1],
414                                 alg_bytesize);
415                 HEXDUMP("RSA q", cookie->input_array[2],
416                                 alg_bytesize);
417                 HEXDUMP("RSA dP", cookie->input_array[3],
418                                 alg_bytesize);
419                 HEXDUMP("RSA dQ", cookie->input_array[4],
420                                 alg_bytesize);
421                 HEXDUMP("RSA qInv", cookie->input_array[5],
422                                 alg_bytesize);
423         } else if (xform->rsa.key_type ==
424                         RTE_RSA_KEY_TYPE_EXP) {
425                 qat_function = get_rsa_dec_function(xform);
426                 func_id = qat_function.func_id;
427                 if (func_id == 0) {
428                         QAT_LOG(ERR, "Cannot obtain functionality id");
429                         return -EINVAL;
430                 }
431                 alg_bytesize = qat_function.bytesize;
432
433                 SET_PKE_LN(cookie->input_array, xform->rsa.d,
434                         alg_bytesize, 1);
435                 SET_PKE_LN(cookie->input_array, xform->rsa.n,
436                         alg_bytesize, 2);
437
438                 HEXDUMP("RSA d", cookie->input_array[1],
439                                 alg_bytesize);
440                 HEXDUMP("RSA n", cookie->input_array[2],
441                                 alg_bytesize);
442         } else {
443                 QAT_LOG(ERR, "Invalid RSA key type");
444                 return -EINVAL;
445         }
446
447         if (asym_op->rsa.op_type ==
448                         RTE_CRYPTO_ASYM_OP_DECRYPT) {
449                 switch (asym_op->rsa.pad) {
450                 case RTE_CRYPTO_RSA_PADDING_NONE:
451                         SET_PKE_LN(cookie->input_array, asym_op->rsa.cipher,
452                                 alg_bytesize, 0);
453                         HEXDUMP("RSA ciphertext", cookie->input_array[0],
454                                 alg_bytesize);
455                         break;
456                 default:
457                         QAT_LOG(ERR,
458                                 "Invalid padding of RSA (Decrypt)");
459                         return -(EINVAL);
460                 }
461
462         } else if (asym_op->rsa.op_type ==
463                         RTE_CRYPTO_ASYM_OP_SIGN) {
464                 switch (asym_op->rsa.pad) {
465                 case RTE_CRYPTO_RSA_PADDING_NONE:
466                         SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
467                                 alg_bytesize, 0);
468                         HEXDUMP("RSA text to be signed", cookie->input_array[0],
469                                 alg_bytesize);
470                         break;
471                 default:
472                         QAT_LOG(ERR,
473                                 "Invalid padding of RSA (Signature)");
474                         return -(EINVAL);
475                 }
476         }
477
478         cookie->alg_bytesize = alg_bytesize;
479         qat_req->pke_hdr.cd_pars.func_id = func_id;
480         return status;
481 }
482
483 static int
484 rsa_set_input(struct rte_crypto_asym_op *asym_op,
485                 struct icp_qat_fw_pke_request *qat_req,
486                 struct qat_asym_op_cookie *cookie,
487                 struct rte_crypto_asym_xform *xform)
488 {
489         qat_req->input_param_count =
490                         QAT_ASYM_RSA_NUM_IN_PARAMS;
491         qat_req->output_param_count =
492                         QAT_ASYM_RSA_NUM_OUT_PARAMS;
493
494         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
495                         asym_op->rsa.op_type ==
496                                 RTE_CRYPTO_ASYM_OP_VERIFY) {
497                 return rsa_set_pub_input(asym_op, qat_req, cookie, xform);
498         } else {
499                 return rsa_set_priv_input(asym_op, qat_req, cookie, xform);
500         }
501 }
502
503 static uint8_t
504 rsa_collect(struct rte_crypto_asym_op *asym_op,
505                 struct qat_asym_op_cookie *cookie)
506 {
507         uint32_t alg_bytesize = cookie->alg_bytesize;
508
509         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
510                 asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
511
512                 if (asym_op->rsa.op_type ==
513                                 RTE_CRYPTO_ASYM_OP_ENCRYPT) {
514                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
515
516                         rte_memcpy(rsa_result,
517                                         cookie->output_array[0],
518                                         alg_bytesize);
519                         HEXDUMP("RSA Encrypted data", cookie->output_array[0],
520                                 alg_bytesize);
521                 } else {
522                         uint8_t *rsa_result = asym_op->rsa.cipher.data;
523
524                         switch (asym_op->rsa.pad) {
525                         case RTE_CRYPTO_RSA_PADDING_NONE:
526                                 rte_memcpy(rsa_result,
527                                                 cookie->output_array[0],
528                                                 alg_bytesize);
529                                 HEXDUMP("RSA signature",
530                                         cookie->output_array[0],
531                                         alg_bytesize);
532                                 break;
533                         default:
534                                 QAT_LOG(ERR, "Padding not supported");
535                                 return RTE_CRYPTO_OP_STATUS_ERROR;
536                         }
537                 }
538         } else {
539                 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
540                         uint8_t *rsa_result = asym_op->rsa.message.data;
541
542                         switch (asym_op->rsa.pad) {
543                         case RTE_CRYPTO_RSA_PADDING_NONE:
544                                 rte_memcpy(rsa_result,
545                                         cookie->output_array[0],
546                                         alg_bytesize);
547                                 HEXDUMP("RSA Decrypted Message",
548                                         cookie->output_array[0],
549                                         alg_bytesize);
550                                 break;
551                         default:
552                                 QAT_LOG(ERR, "Padding not supported");
553                                 return RTE_CRYPTO_OP_STATUS_ERROR;
554                         }
555                 } else {
556                         uint8_t *rsa_result = asym_op->rsa.sign.data;
557
558                         rte_memcpy(rsa_result,
559                                         cookie->output_array[0],
560                                         alg_bytesize);
561                         HEXDUMP("RSA Signature", cookie->output_array[0],
562                                 alg_bytesize);
563                 }
564         }
565         return RTE_CRYPTO_OP_STATUS_SUCCESS;
566 }
567
568 static int
569 ecdsa_set_input(struct rte_crypto_asym_op *asym_op,
570                 struct icp_qat_fw_pke_request *qat_req,
571                 struct qat_asym_op_cookie *cookie,
572                 struct rte_crypto_asym_xform *xform)
573 {
574         struct qat_asym_function qat_function;
575         uint32_t alg_bytesize, qat_alg_bytesize, func_id;
576         int curve_id;
577
578         curve_id = pick_curve(xform);
579         if (curve_id < 0) {
580                 QAT_LOG(ERR, "Incorrect elliptic curve");
581                 return -EINVAL;
582         }
583
584         switch (asym_op->ecdsa.op_type) {
585         case RTE_CRYPTO_ASYM_OP_SIGN:
586                 qat_function = get_ecdsa_function(xform);
587                 func_id = qat_function.func_id;
588                 if (func_id == 0) {
589                         QAT_LOG(ERR, "Cannot obtain functionality id");
590                         return -EINVAL;
591                 }
592                 alg_bytesize = qat_function.bytesize;
593                 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
594
595                 SET_PKE_LN_9A_F(asym_op->ecdsa.pkey, 0);
596                 SET_PKE_LN_9A_F(asym_op->ecdsa.message, 1);
597                 SET_PKE_LN_9A_F(asym_op->ecdsa.k, 2);
598                 SET_PKE_LN_EC_F(curve[curve_id].b, alg_bytesize, 3);
599                 SET_PKE_LN_EC_F(curve[curve_id].a, alg_bytesize, 4);
600                 SET_PKE_LN_EC_F(curve[curve_id].p, alg_bytesize, 5);
601                 SET_PKE_LN_EC_F(curve[curve_id].n, alg_bytesize, 6);
602                 SET_PKE_LN_EC_F(curve[curve_id].y, alg_bytesize, 7);
603                 SET_PKE_LN_EC_F(curve[curve_id].x, alg_bytesize, 8);
604
605                 cookie->alg_bytesize = alg_bytesize;
606                 qat_req->pke_hdr.cd_pars.func_id = func_id;
607                 qat_req->input_param_count =
608                                 QAT_ASYM_ECDSA_RS_SIGN_IN_PARAMS;
609                 qat_req->output_param_count =
610                                 QAT_ASYM_ECDSA_RS_SIGN_OUT_PARAMS;
611
612                 HEXDUMP_OFF_F("ECDSA d", 0);
613                 HEXDUMP_OFF_F("ECDSA e", 1);
614                 HEXDUMP_OFF_F("ECDSA k", 2);
615                 HEXDUMP_OFF_F("ECDSA b", 3);
616                 HEXDUMP_OFF_F("ECDSA a", 4);
617                 HEXDUMP_OFF_F("ECDSA n", 5);
618                 HEXDUMP_OFF_F("ECDSA y", 6);
619                 HEXDUMP_OFF_F("ECDSA x", 7);
620                 break;
621         case RTE_CRYPTO_ASYM_OP_VERIFY:
622                 qat_function = get_ecdsa_verify_function(xform);
623                 func_id = qat_function.func_id;
624                 if (func_id == 0) {
625                         QAT_LOG(ERR, "Cannot obtain functionality id");
626                         return -EINVAL;
627                 }
628                 alg_bytesize = qat_function.bytesize;
629                 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
630
631                 SET_PKE_LN_9A_F(asym_op->ecdsa.message, 10);
632                 SET_PKE_LN_9A_F(asym_op->ecdsa.s, 9);
633                 SET_PKE_LN_9A_F(asym_op->ecdsa.r, 8);
634                 SET_PKE_LN_EC_F(curve[curve_id].n, alg_bytesize, 7);
635                 SET_PKE_LN_EC_F(curve[curve_id].x, alg_bytesize, 6);
636                 SET_PKE_LN_EC_F(curve[curve_id].y, alg_bytesize, 5);
637                 SET_PKE_LN_9A_F(asym_op->ecdsa.q.x, 4);
638                 SET_PKE_LN_9A_F(asym_op->ecdsa.q.y, 3);
639                 SET_PKE_LN_EC_F(curve[curve_id].a, alg_bytesize, 2);
640                 SET_PKE_LN_EC_F(curve[curve_id].b, alg_bytesize, 1);
641                 SET_PKE_LN_EC_F(curve[curve_id].p, alg_bytesize, 0);
642
643                 cookie->alg_bytesize = alg_bytesize;
644                 qat_req->pke_hdr.cd_pars.func_id = func_id;
645                 qat_req->input_param_count =
646                                 QAT_ASYM_ECDSA_RS_VERIFY_IN_PARAMS;
647                 qat_req->output_param_count =
648                                 QAT_ASYM_ECDSA_RS_VERIFY_OUT_PARAMS;
649
650                 HEXDUMP_OFF_F("e", 0);
651                 HEXDUMP_OFF_F("s", 1);
652                 HEXDUMP_OFF_F("r", 2);
653                 HEXDUMP_OFF_F("n", 3);
654                 HEXDUMP_OFF_F("xG", 4);
655                 HEXDUMP_OFF_F("yG", 5);
656                 HEXDUMP_OFF_F("xQ", 6);
657                 HEXDUMP_OFF_F("yQ", 7);
658                 HEXDUMP_OFF_F("a", 8);
659                 HEXDUMP_OFF_F("b", 9);
660                 HEXDUMP_OFF_F("q", 10);
661                 break;
662         default:
663                 return -1;
664         }
665
666         return 0;
667 }
668
669 static uint8_t
670 ecdsa_collect(struct rte_crypto_asym_op *asym_op,
671                 struct qat_asym_op_cookie *cookie)
672 {
673         uint32_t alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8);
674
675         if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
676                 uint8_t *r = asym_op->ecdsa.r.data;
677                 uint8_t *s = asym_op->ecdsa.s.data;
678
679                 asym_op->ecdsa.r.length = alg_bytesize;
680                 asym_op->ecdsa.s.length = alg_bytesize;
681                 rte_memcpy(r, cookie->output_array[0], alg_bytesize);
682                 rte_memcpy(s, cookie->output_array[1], alg_bytesize);
683                 HEXDUMP("R", cookie->output_array[0],
684                         alg_bytesize);
685                 HEXDUMP("S", cookie->output_array[1],
686                         alg_bytesize);
687         }
688         return RTE_CRYPTO_OP_STATUS_SUCCESS;
689 }
690
691 static int
692 ecpm_set_input(struct rte_crypto_asym_op *asym_op,
693                 struct icp_qat_fw_pke_request *qat_req,
694                 struct qat_asym_op_cookie *cookie,
695                 struct rte_crypto_asym_xform *xform)
696 {
697         struct qat_asym_function qat_function;
698         uint32_t alg_bytesize, __rte_unused qat_alg_bytesize, func_id;
699         int curve_id;
700
701         curve_id = pick_curve(xform);
702         if (curve_id < 0) {
703                 QAT_LOG(ERR, "Incorrect elliptic curve");
704                 return -EINVAL;
705         }
706
707         qat_function = get_ecpm_function(xform);
708         func_id = qat_function.func_id;
709         if (func_id == 0) {
710                 QAT_LOG(ERR, "Cannot obtain functionality id");
711                 return -EINVAL;
712         }
713         alg_bytesize = qat_function.bytesize;
714         qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
715
716         SET_PKE_LN(cookie->input_array, asym_op->ecpm.scalar,
717                         alg_bytesize, 0);
718         SET_PKE_LN(cookie->input_array, asym_op->ecpm.p.x,
719                         alg_bytesize, 1);
720         SET_PKE_LN(cookie->input_array, asym_op->ecpm.p.y,
721                         alg_bytesize, 2);
722         SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].a,
723                         alg_bytesize, 3);
724         SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].b,
725                         alg_bytesize, 4);
726         SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].p,
727                         alg_bytesize, 5);
728         SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].h,
729                         alg_bytesize, 6);
730
731         cookie->alg_bytesize = alg_bytesize;
732         qat_req->pke_hdr.cd_pars.func_id = func_id;
733         qat_req->input_param_count =
734                         QAT_ASYM_ECPM_IN_PARAMS;
735         qat_req->output_param_count =
736                         QAT_ASYM_ECPM_OUT_PARAMS;
737
738         HEXDUMP("k", cookie->input_array[0], qat_alg_bytesize);
739         HEXDUMP("xG", cookie->input_array[1], qat_alg_bytesize);
740         HEXDUMP("yG", cookie->input_array[2], qat_alg_bytesize);
741         HEXDUMP("a", cookie->input_array[3], qat_alg_bytesize);
742         HEXDUMP("b", cookie->input_array[4], qat_alg_bytesize);
743         HEXDUMP("q", cookie->input_array[5], qat_alg_bytesize);
744         HEXDUMP("h", cookie->input_array[6], qat_alg_bytesize);
745
746         return 0;
747 }
748
749 static uint8_t
750 ecpm_collect(struct rte_crypto_asym_op *asym_op,
751                 struct qat_asym_op_cookie *cookie)
752 {
753         uint8_t *r = asym_op->ecpm.r.x.data;
754         uint8_t *s = asym_op->ecpm.r.y.data;
755         uint32_t alg_bytesize = cookie->alg_bytesize;
756
757         asym_op->ecpm.r.x.length = alg_bytesize;
758         asym_op->ecpm.r.y.length = alg_bytesize;
759         rte_memcpy(r, cookie->output_array[0], alg_bytesize);
760         rte_memcpy(s, cookie->output_array[1], alg_bytesize);
761
762         HEXDUMP("rX", cookie->output_array[0],
763                 alg_bytesize);
764         HEXDUMP("rY", cookie->output_array[1],
765                 alg_bytesize);
766         return RTE_CRYPTO_OP_STATUS_SUCCESS;
767 }
768
769 static int
770 asym_set_input(struct rte_crypto_asym_op *asym_op,
771                 struct icp_qat_fw_pke_request *qat_req,
772                 struct qat_asym_op_cookie *cookie,
773                 struct rte_crypto_asym_xform *xform)
774 {
775         switch (xform->xform_type) {
776         case RTE_CRYPTO_ASYM_XFORM_MODEX:
777                 return modexp_set_input(asym_op, qat_req,
778                                 cookie, xform);
779         case RTE_CRYPTO_ASYM_XFORM_MODINV:
780                 return modinv_set_input(asym_op, qat_req,
781                                 cookie, xform);
782         case RTE_CRYPTO_ASYM_XFORM_RSA:
783                 return rsa_set_input(asym_op, qat_req,
784                                 cookie, xform);
785         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
786                 return ecdsa_set_input(asym_op, qat_req,
787                                 cookie, xform);
788         case RTE_CRYPTO_ASYM_XFORM_ECPM:
789                 return ecpm_set_input(asym_op, qat_req,
790                                 cookie, xform);
791         default:
792                 QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
793                 return -EINVAL;
794         }
795         return 1;
796 }
797
798 static int
799 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
800                         __rte_unused uint64_t *opaque,
801                         __rte_unused enum qat_device_gen qat_dev_gen)
802 {
803         struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
804         struct rte_crypto_asym_op *asym_op = op->asym;
805         struct icp_qat_fw_pke_request *qat_req =
806                         (struct icp_qat_fw_pke_request *)out_msg;
807         struct qat_asym_op_cookie *cookie =
808                         (struct qat_asym_op_cookie *)op_cookie;
809         int err = 0;
810
811         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
812         switch (op->sess_type) {
813         case RTE_CRYPTO_OP_WITH_SESSION:
814                 QAT_LOG(ERR,
815                         "QAT asymmetric crypto PMD does not support session"
816                         );
817                 goto error;
818         case RTE_CRYPTO_OP_SESSIONLESS:
819                 request_init(qat_req);
820                 err = asym_set_input(asym_op, qat_req, cookie,
821                                 op->asym->xform);
822                 if (err) {
823                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
824                         goto error;
825                 }
826                 break;
827         default:
828                 QAT_DP_LOG(ERR, "Invalid session/xform settings");
829                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
830                 goto error;
831         }
832
833         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
834         qat_req->pke_mid.src_data_addr = cookie->input_addr;
835         qat_req->pke_mid.dest_data_addr = cookie->output_addr;
836
837         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
838
839         return 0;
840 error:
841         qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
842         HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
843         qat_req->output_param_count = 0;
844         qat_req->input_param_count = 0;
845         qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
846         cookie->error |= err;
847
848         return 0;
849 }
850
851 static uint8_t
852 qat_asym_collect_response(struct rte_crypto_op *rx_op,
853                 struct qat_asym_op_cookie *cookie,
854                 struct rte_crypto_asym_xform *xform)
855 {
856         struct rte_crypto_asym_op *asym_op = rx_op->asym;
857
858         switch (xform->xform_type) {
859         case RTE_CRYPTO_ASYM_XFORM_MODEX:
860                 return modexp_collect(asym_op, cookie, xform);
861         case RTE_CRYPTO_ASYM_XFORM_MODINV:
862                 return modinv_collect(asym_op, cookie, xform);
863         case RTE_CRYPTO_ASYM_XFORM_RSA:
864                 return rsa_collect(asym_op, cookie);
865         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
866                 return ecdsa_collect(asym_op, cookie);
867         case RTE_CRYPTO_ASYM_XFORM_ECPM:
868                 return ecpm_collect(asym_op, cookie);
869         default:
870                 QAT_LOG(ERR, "Not supported xform type");
871                 return  RTE_CRYPTO_OP_STATUS_ERROR;
872         }
873 }
874
875 static int
876 qat_asym_process_response(void **op, uint8_t *resp,
877                 void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
878 {
879         struct icp_qat_fw_pke_resp *resp_msg =
880                         (struct icp_qat_fw_pke_resp *)resp;
881         struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
882                         (resp_msg->opaque);
883         struct qat_asym_op_cookie *cookie = op_cookie;
884
885         if (cookie->error) {
886                 cookie->error = 0;
887                 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
888                         rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
889                 QAT_DP_LOG(ERR, "Cookie status returned error");
890         } else {
891                 if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
892                         resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
893                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
894                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
895                         QAT_DP_LOG(ERR, "Asymmetric response status"
896                                         " returned error");
897                 }
898                 if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
899                         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
900                                 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
901                         QAT_DP_LOG(ERR, "Asymmetric common status"
902                                         " returned error");
903                 }
904         }
905         if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
906                 rx_op->status = qat_asym_collect_response(rx_op,
907                                         cookie, rx_op->asym->xform);
908                 cleanup(cookie, rx_op->asym->xform,
909                                         cookie->alg_bytesize);
910         }
911
912         *op = rx_op;
913         HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp));
914
915         return 1;
916 }
917
918 int
919 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
920                 struct rte_crypto_asym_xform *xform __rte_unused,
921                 struct rte_cryptodev_asym_session *sess __rte_unused)
922 {
923         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
924         return -ENOTSUP;
925 }
926
927 unsigned int
928 qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
929 {
930         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
931         return 0;
932 }
933
934 void
935 qat_asym_session_clear(struct rte_cryptodev *dev __rte_unused,
936                 struct rte_cryptodev_asym_session *sess __rte_unused)
937 {
938         QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session");
939 }
940
941 static uint16_t
942 qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
943                 uint16_t nb_ops)
944 {
945         return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
946                         nb_ops);
947 }
948
949 static uint16_t
950 qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
951                 uint16_t nb_ops)
952 {
953         return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
954                                 nb_ops);
955 }
956
957 void
958 qat_asym_init_op_cookie(void *op_cookie)
959 {
960         int j;
961         struct qat_asym_op_cookie *cookie = op_cookie;
962
963         cookie->input_addr = rte_mempool_virt2iova(cookie) +
964                         offsetof(struct qat_asym_op_cookie,
965                                         input_params_ptrs);
966
967         cookie->output_addr = rte_mempool_virt2iova(cookie) +
968                         offsetof(struct qat_asym_op_cookie,
969                                         output_params_ptrs);
970
971         for (j = 0; j < 8; j++) {
972                 cookie->input_params_ptrs[j] =
973                                 rte_mempool_virt2iova(cookie) +
974                                 offsetof(struct qat_asym_op_cookie,
975                                                 input_array[j]);
976                 cookie->output_params_ptrs[j] =
977                                 rte_mempool_virt2iova(cookie) +
978                                 offsetof(struct qat_asym_op_cookie,
979                                                 output_array[j]);
980         }
981 }
982
983 int
984 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
985                 struct qat_dev_cmd_param *qat_dev_cmd_param)
986 {
987         struct qat_cryptodev_private *internals;
988         struct rte_cryptodev *cryptodev;
989         struct qat_device_info *qat_dev_instance =
990                 &qat_pci_devs[qat_pci_dev->qat_dev_id];
991         struct rte_cryptodev_pmd_init_params init_params = {
992                 .name = "",
993                 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
994                 .private_data_size = sizeof(struct qat_cryptodev_private)
995         };
996         struct qat_capabilities_info capa_info;
997         const struct rte_cryptodev_capabilities *capabilities;
998         const struct qat_crypto_gen_dev_ops *gen_dev_ops =
999                 &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
1000         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1001         char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1002         uint64_t capa_size;
1003         int i = 0;
1004
1005         snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
1006                         qat_pci_dev->name, "asym");
1007         QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
1008
1009         if (gen_dev_ops->cryptodev_ops == NULL) {
1010                 QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
1011                                 name);
1012                 return -(EFAULT);
1013         }
1014
1015         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1016                 qat_pci_dev->qat_asym_driver_id =
1017                                 qat_asym_driver_id;
1018         } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1019                 if (qat_pci_dev->qat_asym_driver_id !=
1020                                 qat_asym_driver_id) {
1021                         QAT_LOG(ERR,
1022                                 "Device %s have different driver id than corresponding device in primary process",
1023                                 name);
1024                         return -(EFAULT);
1025                 }
1026         }
1027
1028         /* Populate subset device to use in cryptodev device creation */
1029         qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
1030         qat_dev_instance->asym_rte_dev.numa_node =
1031                         qat_dev_instance->pci_dev->device.numa_node;
1032         qat_dev_instance->asym_rte_dev.devargs = NULL;
1033
1034         cryptodev = rte_cryptodev_pmd_create(name,
1035                         &(qat_dev_instance->asym_rte_dev), &init_params);
1036
1037         if (cryptodev == NULL)
1038                 return -ENODEV;
1039
1040         qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
1041         cryptodev->driver_id = qat_asym_driver_id;
1042         cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
1043
1044         cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
1045         cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
1046
1047         cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
1048
1049         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1050                 return 0;
1051
1052         snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
1053                         "QAT_ASYM_CAPA_GEN_%d",
1054                         qat_pci_dev->qat_dev_gen);
1055
1056         internals = cryptodev->data->dev_private;
1057         internals->qat_dev = qat_pci_dev;
1058         internals->dev_id = cryptodev->data->dev_id;
1059
1060         capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
1061         capabilities = capa_info.data;
1062         capa_size = capa_info.size;
1063
1064         internals->capa_mz = rte_memzone_lookup(capa_memz_name);
1065         if (internals->capa_mz == NULL) {
1066                 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
1067                                 capa_size, rte_socket_id(), 0);
1068                 if (internals->capa_mz == NULL) {
1069                         QAT_LOG(DEBUG,
1070                                 "Error allocating memzone for capabilities, "
1071                                 "destroying PMD for %s",
1072                                 name);
1073                         rte_cryptodev_pmd_destroy(cryptodev);
1074                         memset(&qat_dev_instance->asym_rte_dev, 0,
1075                                 sizeof(qat_dev_instance->asym_rte_dev));
1076                         return -EFAULT;
1077                 }
1078         }
1079
1080         memcpy(internals->capa_mz->addr, capabilities, capa_size);
1081         internals->qat_dev_capabilities = internals->capa_mz->addr;
1082
1083         while (1) {
1084                 if (qat_dev_cmd_param[i].name == NULL)
1085                         break;
1086                 if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
1087                         internals->min_enq_burst_threshold =
1088                                         qat_dev_cmd_param[i].val;
1089                 i++;
1090         }
1091
1092         qat_pci_dev->asym_dev = internals;
1093         internals->service_type = QAT_SERVICE_ASYMMETRIC;
1094         QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
1095                         cryptodev->data->name, internals->dev_id);
1096         return 0;
1097 }
1098
1099 int
1100 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
1101 {
1102         struct rte_cryptodev *cryptodev;
1103
1104         if (qat_pci_dev == NULL)
1105                 return -ENODEV;
1106         if (qat_pci_dev->asym_dev == NULL)
1107                 return 0;
1108         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1109                 rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
1110
1111         /* free crypto device */
1112         cryptodev = rte_cryptodev_pmd_get_dev(
1113                         qat_pci_dev->asym_dev->dev_id);
1114         rte_cryptodev_pmd_destroy(cryptodev);
1115         qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
1116         qat_pci_dev->asym_dev = NULL;
1117
1118         return 0;
1119 }
1120
1121 static struct cryptodev_driver qat_crypto_drv;
1122 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
1123                 cryptodev_qat_asym_driver,
1124                 qat_asym_driver_id);