crypto/qat: handle Single Pass Crypto Requests on GEN3
authorAdam Dybkowski <adamx.dybkowski@intel.com>
Tue, 8 Oct 2019 12:44:33 +0000 (14:44 +0200)
committerAkhil Goyal <akhil.goyal@nxp.com>
Wed, 9 Oct 2019 09:50:12 +0000 (11:50 +0200)
This patch improves the performance of AES GCM by using
the Single Pass Crypto Request functionality when running
on GEN3 QAT. Falls back to the classic 2-pass mode on older
hardware.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>
doc/guides/cryptodevs/qat.rst
doc/guides/rel_notes/release_19_11.rst
drivers/crypto/qat/qat_sym.c
drivers/crypto/qat/qat_sym_session.c
drivers/crypto/qat/qat_sym_session.h

index e905f6d..ad685a7 100644 (file)
@@ -82,6 +82,17 @@ Limitations
 * ZUC EEA3/EIA3 is not supported by dh895xcc devices
 * Maximum additional authenticated data (AAD) for GCM is 240 bytes long and must be passed to the device in a buffer rounded up to the nearest block-size multiple (x16) and padded with zeros.
 * Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported).
+* A GCM limitation exists, but only in the case where there are multiple
+  generations of QAT devices on a single platform.
+  To optimise performance, the GCM crypto session should be initialised for the
+  device generation to which the ops will be enqueued. Specifically if a GCM
+  session is initialised on a GEN2 device, but then attached to an op enqueued
+  to a GEN3 device, it will work but cannot take advantage of hardware
+  optimisations in the GEN3 device. And if a GCM session is initialised on a
+  GEN3 device, then attached to an op sent to a GEN1/GEN2 device, it will not be
+  enqueued to the device and will be marked as failed. The simplest way to
+  mitigate this is to use the bdf whitelist to avoid mixing devices of different
+  generations in the same process if planning to use for GCM.
 
 Extra notes on KASUMI F9
 ~~~~~~~~~~~~~~~~~~~~~~~~
index e3f8817..8049144 100644 (file)
@@ -67,6 +67,13 @@ New Features
   Support is added for all sequence number sizes for control and user plane.
   Test application is updated for unit testing.
 
+* **Enabled Single Pass GCM acceleration on QAT GEN3.**
+
+  Added support for Single Pass GCM, available on QAT GEN3 only (Intel
+  QuickAssist Technology C4xxx). It is automatically chosen instead of the
+  classic 2-pass mode when running on QAT GEN3, significantly improving
+  the performance of AES GCM operations.
+
 * **Updated the Intel QuickAssist Technology (QAT) compression PMD.**
 
   Added stateful decompression support in the Intel QuickAssist Technology PMD.
index 46ef27a..5ff4aa1 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -12,6 +12,7 @@
 
 #include "qat_sym.h"
 
+
 /** Decrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -195,7 +196,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
        rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
        qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
        cipher_param = (void *)&qat_req->serv_specif_rqpars;
-       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+       auth_param = (void *)((uint8_t *)cipher_param +
+                       ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
 
        if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
                        ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
@@ -593,6 +595,13 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                qat_req->comn_mid.dest_data_addr = dst_buf_start;
        }
 
+       /* Handle Single-Pass GCM */
+       if (ctx->is_single_pass) {
+               cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
+               cipher_param->spc_auth_res_addr =
+                               op->sym->aead.digest.phys_addr;
+       }
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
        QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
                        sizeof(struct icp_qat_fw_la_bulk_req));
index e5167b3..72290ba 100644 (file)
@@ -450,7 +450,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                break;
        case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
                if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-                       ret = qat_sym_session_configure_aead(xform,
+                       ret = qat_sym_session_configure_aead(dev, xform,
                                        session);
                        if (ret < 0)
                                return ret;
@@ -467,7 +467,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                break;
        case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
                if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-                       ret = qat_sym_session_configure_aead(xform,
+                       ret = qat_sym_session_configure_aead(dev, xform,
                                        session);
                        if (ret < 0)
                                return ret;
@@ -503,6 +503,73 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
        return 0;
 }
 
+static int
+qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals,
+               struct qat_sym_session *session,
+               struct rte_crypto_aead_xform *aead_xform)
+{
+       enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+
+       if (qat_dev_gen == QAT_GEN3 &&
+                       aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) {
+               /* Use faster Single-Pass GCM */
+               struct icp_qat_fw_la_cipher_req_params *cipher_param =
+                               (void *) &session->fw_req.serv_specif_rqpars;
+
+               session->is_single_pass = 1;
+               session->min_qat_dev_gen = QAT_GEN3;
+               session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+               session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
+               session->cipher_iv.offset = aead_xform->iv.offset;
+               session->cipher_iv.length = aead_xform->iv.length;
+               if (qat_sym_session_aead_create_cd_cipher(session,
+                               aead_xform->key.data, aead_xform->key.length))
+                       return -EINVAL;
+               session->aad_len = aead_xform->aad_length;
+               session->digest_length = aead_xform->digest_length;
+               if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+                       session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+                       session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+                       ICP_QAT_FW_LA_RET_AUTH_SET(
+                               session->fw_req.comn_hdr.serv_specif_flags,
+                               ICP_QAT_FW_LA_RET_AUTH_RES);
+               } else {
+                       session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+                       session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+                       ICP_QAT_FW_LA_CMP_AUTH_SET(
+                               session->fw_req.comn_hdr.serv_specif_flags,
+                               ICP_QAT_FW_LA_CMP_AUTH_RES);
+               }
+               ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+                               session->fw_req.comn_hdr.serv_specif_flags,
+                               ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+               ICP_QAT_FW_LA_PROTO_SET(
+                               session->fw_req.comn_hdr.serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+               ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+                               session->fw_req.comn_hdr.serv_specif_flags,
+                               ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+               session->fw_req.comn_hdr.service_cmd_id =
+                               ICP_QAT_FW_LA_CMD_CIPHER;
+               session->cd.cipher.cipher_config.val =
+                               ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+                                       ICP_QAT_HW_CIPHER_AEAD_MODE,
+                                       session->qat_cipher_alg,
+                                       ICP_QAT_HW_CIPHER_NO_CONVERT,
+                                       session->qat_dir);
+               QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
+                               aead_xform->digest_length,
+                               QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+                               QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+               session->cd.cipher.cipher_config.reserved =
+                               ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
+                                       aead_xform->aad_length);
+               cipher_param->spc_aad_sz = aead_xform->aad_length;
+               cipher_param->spc_auth_res_sz = aead_xform->digest_length;
+       }
+       return 0;
+}
+
 int
 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
                                struct rte_crypto_sym_xform *xform,
@@ -646,7 +713,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 }
 
 int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+                               struct rte_crypto_sym_xform *xform,
                                struct qat_sym_session *session)
 {
        struct rte_crypto_aead_xform *aead_xform = &xform->aead;
@@ -684,6 +752,17 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
                return -EINVAL;
        }
 
+       session->is_single_pass = 0;
+       if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+               /* Use faster Single-Pass GCM if possible */
+               int res = qat_sym_session_handle_single_pass(
+                               dev->data->dev_private, session, aead_xform);
+               if (res < 0)
+                       return res;
+               if (session->is_single_pass)
+                       return 0;
+       }
+
        if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
                        aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
                        (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1444,7 +1523,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
        struct icp_qat_fw_la_auth_req_params *auth_param =
                (struct icp_qat_fw_la_auth_req_params *)
                ((char *)&req_tmpl->serv_specif_rqpars +
-               sizeof(struct icp_qat_fw_la_cipher_req_params));
+               ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
        uint16_t state1_size = 0, state2_size = 0;
        uint16_t hash_offset, cd_size;
        uint32_t *aad_len = NULL;
index ce1ca5a..98985d6 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -25,6 +25,9 @@
 #define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
 #define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
 
+/* 96-bit case of IV for CCP/GCM single pass algorithm */
+#define QAT_AES_GCM_SPC_IV_SIZE 12
+
 
 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
        ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
@@ -78,6 +81,7 @@ struct qat_sym_session {
        rte_spinlock_t lock;    /* protects this struct */
        enum qat_device_gen min_qat_dev_gen;
        uint8_t aes_cmac;
+       uint8_t is_single_pass;
 };
 
 int
@@ -91,7 +95,8 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                struct rte_crypto_sym_xform *xform, void *session_private);
 
 int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+                               struct rte_crypto_sym_xform *xform,
                                struct qat_sym_session *session);
 
 int