1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 - 2022 Intel Corporation
7 #include <cryptodev_pmd.h>
9 #include "icp_qat_fw_pke.h"
10 #include "icp_qat_fw.h"
11 #include "qat_pke_functionality_arrays.h"
13 #include "qat_device.h"
18 uint8_t qat_asym_driver_id;
20 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
23 qat_asym_init_op_cookie(void *op_cookie)
26 struct qat_asym_op_cookie *cookie = op_cookie;
28 cookie->input_addr = rte_mempool_virt2iova(cookie) +
29 offsetof(struct qat_asym_op_cookie,
32 cookie->output_addr = rte_mempool_virt2iova(cookie) +
33 offsetof(struct qat_asym_op_cookie,
36 for (j = 0; j < 8; j++) {
37 cookie->input_params_ptrs[j] =
38 rte_mempool_virt2iova(cookie) +
39 offsetof(struct qat_asym_op_cookie,
41 cookie->output_params_ptrs[j] =
42 rte_mempool_virt2iova(cookie) +
43 offsetof(struct qat_asym_op_cookie,
48 /* An rte_driver is needed in the registration of both the device and the driver
50 * The actual qat pci's rte_driver can't be used as its name represents
51 * the whole pci device with all services. Think of this as a holder for a name
52 * for the crypto part of the pci device.
54 static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
55 static const struct rte_driver cryptodev_qat_asym_driver = {
56 .name = qat_asym_drv_name,
57 .alias = qat_asym_drv_name
61 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
62 int in_count, int out_count, int alg_size)
66 for (i = 0; i < in_count; i++)
67 memset(cookie->input_array[i], 0x0, alg_size);
68 for (i = 0; i < out_count; i++)
69 memset(cookie->output_array[i], 0x0, alg_size);
72 static void qat_clear_arrays_crt(struct qat_asym_op_cookie *cookie,
77 memset(cookie->input_array[0], 0x0, alg_size);
78 for (i = 1; i < QAT_ASYM_RSA_QT_NUM_IN_PARAMS; i++)
79 memset(cookie->input_array[i], 0x0, alg_size / 2);
80 for (i = 0; i < QAT_ASYM_RSA_NUM_OUT_PARAMS; i++)
81 memset(cookie->output_array[i], 0x0, alg_size);
84 static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
85 struct rte_crypto_asym_xform *xform, int alg_size)
87 if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX)
88 qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
89 QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size);
90 else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV)
91 qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
92 QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
93 else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
94 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
95 qat_clear_arrays_crt(cookie, alg_size);
97 qat_clear_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
98 QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size);
103 #define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
106 qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
107 size_t arr_sz, size_t *size, uint32_t *func_id)
111 for (i = 0; i < arr_sz; i++) {
112 if (*size <= arr[i][0]) {
114 *func_id = arr[i][1];
129 len = va_arg(args, size_t);
131 for (i = 0; i < n - 1; i++) {
132 num = va_arg(args, size_t);
142 qat_asym_check_nonzero(rte_crypto_param n)
145 /* Not a case for any cryptographic function except for DH
146 * generator which very often can be of one byte length
150 if (n.data[n.length - 1] == 0x0) {
151 for (i = 0; i < n.length - 1; i++)
152 if (n.data[i] != 0x0)
154 if (i == n.length - 1)
157 } else if (*(uint64_t *)&n.data[
158 n.length - 8] == 0) {
159 /* Very likely it is zeroed modulus */
162 for (i = 0; i < n.length - 8; i++)
163 if (n.data[i] != 0x0)
165 if (i == n.length - 8)
173 qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
174 struct icp_qat_fw_pke_request *qat_req,
175 struct qat_asym_op_cookie *cookie,
176 struct rte_crypto_asym_xform *xform)
180 size_t alg_size_in_bytes;
181 uint32_t func_id = 0;
183 if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
184 err = qat_asym_check_nonzero(xform->modex.modulus);
186 QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
187 " aborting this operation");
191 alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
192 xform->modex.exponent.length,
193 xform->modex.modulus.length);
194 alg_size = alg_size_in_bytes << 3;
196 if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
197 sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
198 &alg_size, &func_id)) {
202 alg_size_in_bytes = alg_size >> 3;
203 rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
204 asym_op->modex.base.length
205 , asym_op->modex.base.data,
206 asym_op->modex.base.length);
207 rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
208 xform->modex.exponent.length
209 , xform->modex.exponent.data,
210 xform->modex.exponent.length);
211 rte_memcpy(cookie->input_array[2] + alg_size_in_bytes -
212 xform->modex.modulus.length,
213 xform->modex.modulus.data,
214 xform->modex.modulus.length);
215 cookie->alg_size = alg_size;
216 qat_req->pke_hdr.cd_pars.func_id = func_id;
217 qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
218 qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
219 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
220 QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
221 cookie->input_array[0],
223 QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
224 cookie->input_array[1],
226 QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
227 cookie->input_array[2],
230 } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
231 err = qat_asym_check_nonzero(xform->modinv.modulus);
233 QAT_LOG(ERR, "Empty modulus in modular multiplicative"
234 " inverse, aborting this operation");
238 alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
239 xform->modinv.modulus.length);
240 alg_size = alg_size_in_bytes << 3;
242 if (xform->modinv.modulus.data[
243 xform->modinv.modulus.length - 1] & 0x01) {
244 if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
245 sizeof(MOD_INV_IDS_ODD)/
246 sizeof(*MOD_INV_IDS_ODD),
247 &alg_size, &func_id)) {
251 if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
252 sizeof(MOD_INV_IDS_EVEN)/
253 sizeof(*MOD_INV_IDS_EVEN),
254 &alg_size, &func_id)) {
259 alg_size_in_bytes = alg_size >> 3;
260 rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
261 asym_op->modinv.base.length
262 , asym_op->modinv.base.data,
263 asym_op->modinv.base.length);
264 rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
265 xform->modinv.modulus.length
266 , xform->modinv.modulus.data,
267 xform->modinv.modulus.length);
268 cookie->alg_size = alg_size;
269 qat_req->pke_hdr.cd_pars.func_id = func_id;
270 qat_req->input_param_count =
271 QAT_ASYM_MODINV_NUM_IN_PARAMS;
272 qat_req->output_param_count =
273 QAT_ASYM_MODINV_NUM_OUT_PARAMS;
274 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
275 QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
276 cookie->input_array[0],
278 QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
279 cookie->input_array[1],
282 } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
283 err = qat_asym_check_nonzero(xform->rsa.n);
285 QAT_LOG(ERR, "Empty modulus in RSA"
286 " inverse, aborting this operation");
290 alg_size_in_bytes = xform->rsa.n.length;
291 alg_size = alg_size_in_bytes << 3;
293 qat_req->input_param_count =
294 QAT_ASYM_RSA_NUM_IN_PARAMS;
295 qat_req->output_param_count =
296 QAT_ASYM_RSA_NUM_OUT_PARAMS;
298 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
299 asym_op->rsa.op_type ==
300 RTE_CRYPTO_ASYM_OP_VERIFY) {
302 if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
304 sizeof(*RSA_ENC_IDS),
305 &alg_size, &func_id)) {
308 "Not supported RSA parameter size (key)");
311 alg_size_in_bytes = alg_size >> 3;
312 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
313 switch (asym_op->rsa.pad) {
314 case RTE_CRYPTO_RSA_PADDING_NONE:
315 rte_memcpy(cookie->input_array[0] +
317 asym_op->rsa.message.length
318 , asym_op->rsa.message.data,
319 asym_op->rsa.message.length);
324 "Invalid RSA padding (Encryption)");
327 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
328 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
329 cookie->input_array[0],
333 switch (asym_op->rsa.pad) {
334 case RTE_CRYPTO_RSA_PADDING_NONE:
335 rte_memcpy(cookie->input_array[0],
336 asym_op->rsa.sign.data,
342 "Invalid RSA padding (Verify)");
346 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
347 QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
348 cookie->input_array[0],
353 rte_memcpy(cookie->input_array[1] +
357 xform->rsa.e.length);
358 rte_memcpy(cookie->input_array[2] +
362 xform->rsa.n.length);
364 cookie->alg_size = alg_size;
365 qat_req->pke_hdr.cd_pars.func_id = func_id;
367 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
368 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
369 cookie->input_array[1], alg_size_in_bytes);
370 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
371 cookie->input_array[2], alg_size_in_bytes);
374 if (asym_op->rsa.op_type ==
375 RTE_CRYPTO_ASYM_OP_DECRYPT) {
376 switch (asym_op->rsa.pad) {
377 case RTE_CRYPTO_RSA_PADDING_NONE:
378 rte_memcpy(cookie->input_array[0]
379 + alg_size_in_bytes -
380 asym_op->rsa.cipher.length,
381 asym_op->rsa.cipher.data,
382 asym_op->rsa.cipher.length);
386 "Invalid padding of RSA (Decrypt)");
390 } else if (asym_op->rsa.op_type ==
391 RTE_CRYPTO_ASYM_OP_SIGN) {
392 switch (asym_op->rsa.pad) {
393 case RTE_CRYPTO_RSA_PADDING_NONE:
394 rte_memcpy(cookie->input_array[0]
395 + alg_size_in_bytes -
396 asym_op->rsa.message.length,
397 asym_op->rsa.message.data,
398 asym_op->rsa.message.length);
402 "Invalid padding of RSA (Signature)");
406 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
408 qat_req->input_param_count =
409 QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
410 if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
411 sizeof(RSA_DEC_CRT_IDS)/
412 sizeof(*RSA_DEC_CRT_IDS),
413 &alg_size, &func_id)) {
416 alg_size_in_bytes = alg_size >> 3;
418 rte_memcpy(cookie->input_array[1] +
419 (alg_size_in_bytes >> 1) -
420 xform->rsa.qt.p.length
421 , xform->rsa.qt.p.data,
422 xform->rsa.qt.p.length);
423 rte_memcpy(cookie->input_array[2] +
424 (alg_size_in_bytes >> 1) -
425 xform->rsa.qt.q.length
426 , xform->rsa.qt.q.data,
427 xform->rsa.qt.q.length);
428 rte_memcpy(cookie->input_array[3] +
429 (alg_size_in_bytes >> 1) -
430 xform->rsa.qt.dP.length
431 , xform->rsa.qt.dP.data,
432 xform->rsa.qt.dP.length);
433 rte_memcpy(cookie->input_array[4] +
434 (alg_size_in_bytes >> 1) -
435 xform->rsa.qt.dQ.length
436 , xform->rsa.qt.dQ.data,
437 xform->rsa.qt.dQ.length);
438 rte_memcpy(cookie->input_array[5] +
439 (alg_size_in_bytes >> 1) -
440 xform->rsa.qt.qInv.length
441 , xform->rsa.qt.qInv.data,
442 xform->rsa.qt.qInv.length);
443 cookie->alg_size = alg_size;
444 qat_req->pke_hdr.cd_pars.func_id = func_id;
446 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
447 QAT_DP_HEXDUMP_LOG(DEBUG, "C",
448 cookie->input_array[0],
450 QAT_DP_HEXDUMP_LOG(DEBUG, "p",
451 cookie->input_array[1],
453 QAT_DP_HEXDUMP_LOG(DEBUG, "q",
454 cookie->input_array[2],
456 QAT_DP_HEXDUMP_LOG(DEBUG,
457 "dP", cookie->input_array[3],
459 QAT_DP_HEXDUMP_LOG(DEBUG,
460 "dQ", cookie->input_array[4],
462 QAT_DP_HEXDUMP_LOG(DEBUG,
463 "qInv", cookie->input_array[5],
466 } else if (xform->rsa.key_type ==
467 RTE_RSA_KEY_TYPE_EXP) {
468 if (qat_asym_get_sz_and_func_id(
471 sizeof(*RSA_DEC_IDS),
472 &alg_size, &func_id)) {
475 alg_size_in_bytes = alg_size >> 3;
476 rte_memcpy(cookie->input_array[1] +
480 xform->rsa.d.length);
481 rte_memcpy(cookie->input_array[2] +
485 xform->rsa.n.length);
486 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
487 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
488 cookie->input_array[0],
490 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d", cookie->input_array[1],
492 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n", cookie->input_array[2],
496 cookie->alg_size = alg_size;
497 qat_req->pke_hdr.cd_pars.func_id = func_id;
499 QAT_LOG(ERR, "Invalid RSA key type");
504 QAT_LOG(ERR, "Invalid asymmetric crypto xform");
510 static __rte_always_inline int
511 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
512 __rte_unused uint64_t *opaque,
513 __rte_unused enum qat_device_gen dev_gen)
515 struct qat_asym_session *ctx;
516 struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
517 struct rte_crypto_asym_op *asym_op = op->asym;
518 struct icp_qat_fw_pke_request *qat_req =
519 (struct icp_qat_fw_pke_request *)out_msg;
520 struct qat_asym_op_cookie *cookie =
521 (struct qat_asym_op_cookie *)op_cookie;
524 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
525 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
526 ctx = (struct qat_asym_session *)
527 op->asym->session->sess_private_data;
528 if (unlikely(ctx == NULL)) {
529 QAT_LOG(ERR, "Session has not been created for this device");
532 rte_mov64((uint8_t *)qat_req, (const uint8_t *)&(ctx->req_tmpl));
533 err = qat_asym_fill_arrays(asym_op, qat_req, cookie, ctx->xform);
535 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
538 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
539 qat_fill_req_tmpl(qat_req);
540 err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
543 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
547 QAT_DP_LOG(ERR, "Invalid session/xform settings");
548 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
552 qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
553 qat_req->pke_mid.src_data_addr = cookie->input_addr;
554 qat_req->pke_mid.dest_data_addr = cookie->output_addr;
556 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
557 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
558 sizeof(struct icp_qat_fw_pke_request));
564 qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
566 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
567 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
568 sizeof(struct icp_qat_fw_pke_request));
571 qat_req->output_param_count = 0;
572 qat_req->input_param_count = 0;
573 qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
574 cookie->error |= err;
579 static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
580 struct qat_asym_op_cookie *cookie,
581 struct rte_crypto_asym_xform *xform)
583 size_t alg_size, alg_size_in_bytes = 0;
584 struct rte_crypto_asym_op *asym_op = rx_op->asym;
586 if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
587 rte_crypto_param n = xform->modex.modulus;
589 alg_size = cookie->alg_size;
590 alg_size_in_bytes = alg_size >> 3;
591 uint8_t *modexp_result = asym_op->modex.result.data;
593 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
594 rte_memcpy(modexp_result +
595 (asym_op->modex.result.length -
597 cookie->output_array[0] + alg_size_in_bytes
600 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
601 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
602 QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
603 cookie->output_array[0],
608 } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
609 rte_crypto_param n = xform->modinv.modulus;
611 alg_size = cookie->alg_size;
612 alg_size_in_bytes = alg_size >> 3;
613 uint8_t *modinv_result = asym_op->modinv.result.data;
615 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
616 rte_memcpy(modinv_result + (asym_op->modinv.result.length
618 cookie->output_array[0] + alg_size_in_bytes
619 - n.length, n.length);
620 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
621 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
622 QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
623 cookie->output_array[0],
627 } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
629 alg_size = cookie->alg_size;
630 alg_size_in_bytes = alg_size >> 3;
631 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
632 asym_op->rsa.op_type ==
633 RTE_CRYPTO_ASYM_OP_VERIFY) {
634 if (asym_op->rsa.op_type ==
635 RTE_CRYPTO_ASYM_OP_ENCRYPT) {
636 uint8_t *rsa_result = asym_op->rsa.cipher.data;
638 rte_memcpy(rsa_result,
639 cookie->output_array[0],
641 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
642 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
643 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
644 cookie->output_array[0],
647 } else if (asym_op->rsa.op_type ==
648 RTE_CRYPTO_ASYM_OP_VERIFY) {
649 uint8_t *rsa_result = asym_op->rsa.cipher.data;
651 switch (asym_op->rsa.pad) {
652 case RTE_CRYPTO_RSA_PADDING_NONE:
653 rte_memcpy(rsa_result,
654 cookie->output_array[0],
657 RTE_CRYPTO_OP_STATUS_SUCCESS;
658 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
659 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
660 cookie->output_array[0],
665 QAT_LOG(ERR, "Padding not supported");
667 RTE_CRYPTO_OP_STATUS_ERROR;
672 if (asym_op->rsa.op_type ==
673 RTE_CRYPTO_ASYM_OP_DECRYPT) {
674 uint8_t *rsa_result = asym_op->rsa.message.data;
676 switch (asym_op->rsa.pad) {
677 case RTE_CRYPTO_RSA_PADDING_NONE:
678 rte_memcpy(rsa_result,
679 cookie->output_array[0],
682 RTE_CRYPTO_OP_STATUS_SUCCESS;
685 QAT_LOG(ERR, "Padding not supported");
687 RTE_CRYPTO_OP_STATUS_ERROR;
690 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
691 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
692 rsa_result, alg_size_in_bytes);
694 } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
695 uint8_t *rsa_result = asym_op->rsa.sign.data;
697 rte_memcpy(rsa_result,
698 cookie->output_array[0],
700 rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
701 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
702 QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
703 cookie->output_array[0],
709 qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
713 qat_asym_process_response(void **op, uint8_t *resp,
714 void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
716 struct qat_asym_session *ctx;
717 struct icp_qat_fw_pke_resp *resp_msg =
718 (struct icp_qat_fw_pke_resp *)resp;
719 struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
721 struct qat_asym_op_cookie *cookie = op_cookie;
725 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
726 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
727 QAT_DP_LOG(ERR, "Cookie status returned error");
729 if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
730 resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
731 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
732 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
733 QAT_DP_LOG(ERR, "Asymmetric response status"
736 if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
737 if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
738 rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
739 QAT_DP_LOG(ERR, "Asymmetric common status"
744 if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
745 ctx = (struct qat_asym_session *)
746 rx_op->asym->session->sess_private_data;
747 qat_asym_collect_response(rx_op, cookie, ctx->xform);
748 } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
749 qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
753 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
754 QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
755 sizeof(struct icp_qat_fw_pke_resp));
762 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
763 struct rte_crypto_asym_xform *xform,
764 struct rte_cryptodev_asym_session *sess)
766 struct qat_asym_session *session;
768 session = (struct qat_asym_session *) sess->sess_private_data;
769 if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
770 if (xform->modex.exponent.length == 0 ||
771 xform->modex.modulus.length == 0) {
772 QAT_LOG(ERR, "Invalid mod exp input parameter");
775 } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
776 if (xform->modinv.modulus.length == 0) {
777 QAT_LOG(ERR, "Invalid mod inv input parameter");
780 } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
781 if (xform->rsa.n.length == 0) {
782 QAT_LOG(ERR, "Invalid rsa input parameter");
785 } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
786 || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
787 QAT_LOG(ERR, "Invalid asymmetric crypto xform");
790 QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
794 session->xform = xform;
795 qat_asym_build_req_tmpl(session);
800 unsigned int qat_asym_session_get_private_size(
801 struct rte_cryptodev *dev __rte_unused)
803 return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
807 qat_asym_session_clear(struct rte_cryptodev *dev,
808 struct rte_cryptodev_asym_session *sess)
810 void *sess_priv = sess->sess_private_data;
811 struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
814 memset(s, 0, qat_asym_session_get_private_size(dev));
818 qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
821 return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
826 qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
829 return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
834 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
835 struct qat_dev_cmd_param *qat_dev_cmd_param)
837 struct qat_cryptodev_private *internals;
838 struct rte_cryptodev *cryptodev;
839 struct qat_device_info *qat_dev_instance =
840 &qat_pci_devs[qat_pci_dev->qat_dev_id];
841 struct rte_cryptodev_pmd_init_params init_params = {
843 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
844 .private_data_size = sizeof(struct qat_cryptodev_private)
846 struct qat_capabilities_info capa_info;
847 const struct rte_cryptodev_capabilities *capabilities;
848 const struct qat_crypto_gen_dev_ops *gen_dev_ops =
849 &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
850 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
851 char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
855 snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
856 qat_pci_dev->name, "asym");
857 QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
859 if (gen_dev_ops->cryptodev_ops == NULL) {
860 QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
865 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
866 qat_pci_dev->qat_asym_driver_id =
868 } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
869 if (qat_pci_dev->qat_asym_driver_id !=
870 qat_asym_driver_id) {
872 "Device %s have different driver id than corresponding device in primary process",
878 /* Populate subset device to use in cryptodev device creation */
879 qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
880 qat_dev_instance->asym_rte_dev.numa_node =
881 qat_dev_instance->pci_dev->device.numa_node;
882 qat_dev_instance->asym_rte_dev.devargs = NULL;
884 cryptodev = rte_cryptodev_pmd_create(name,
885 &(qat_dev_instance->asym_rte_dev), &init_params);
887 if (cryptodev == NULL)
890 qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
891 cryptodev->driver_id = qat_asym_driver_id;
892 cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
894 cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
895 cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
897 cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
899 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
902 snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
903 "QAT_ASYM_CAPA_GEN_%d",
904 qat_pci_dev->qat_dev_gen);
906 internals = cryptodev->data->dev_private;
907 internals->qat_dev = qat_pci_dev;
908 internals->dev_id = cryptodev->data->dev_id;
910 capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
911 capabilities = capa_info.data;
912 capa_size = capa_info.size;
914 internals->capa_mz = rte_memzone_lookup(capa_memz_name);
915 if (internals->capa_mz == NULL) {
916 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
917 capa_size, rte_socket_id(), 0);
918 if (internals->capa_mz == NULL) {
920 "Error allocating memzone for capabilities, "
921 "destroying PMD for %s",
923 rte_cryptodev_pmd_destroy(cryptodev);
924 memset(&qat_dev_instance->asym_rte_dev, 0,
925 sizeof(qat_dev_instance->asym_rte_dev));
930 memcpy(internals->capa_mz->addr, capabilities, capa_size);
931 internals->qat_dev_capabilities = internals->capa_mz->addr;
934 if (qat_dev_cmd_param[i].name == NULL)
936 if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
937 internals->min_enq_burst_threshold =
938 qat_dev_cmd_param[i].val;
942 qat_pci_dev->asym_dev = internals;
943 internals->service_type = QAT_SERVICE_ASYMMETRIC;
944 QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
945 cryptodev->data->name, internals->dev_id);
950 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
952 struct rte_cryptodev *cryptodev;
954 if (qat_pci_dev == NULL)
956 if (qat_pci_dev->asym_dev == NULL)
958 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
959 rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
961 /* free crypto device */
962 cryptodev = rte_cryptodev_pmd_get_dev(
963 qat_pci_dev->asym_dev->dev_id);
964 rte_cryptodev_pmd_destroy(cryptodev);
965 qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
966 qat_pci_dev->asym_dev = NULL;
971 static struct cryptodev_driver qat_crypto_drv;
972 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
973 cryptodev_qat_asym_driver,