crypto/qat: rework session functions
authorKai Ji <kai.ji@intel.com>
Wed, 23 Feb 2022 00:50:00 +0000 (08:50 +0800)
committerAkhil Goyal <gakhil@marvell.com>
Wed, 23 Feb 2022 08:59:16 +0000 (09:59 +0100)
This patch introduces a set of set_session methods to QAT
generations. In addition, the reuse of QAT session between
generations is prohibit as the support of min_qat_dev_gen_id'
is removed.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
drivers/crypto/qat/qat_crypto.h
drivers/crypto/qat/qat_sym.c
drivers/crypto/qat/qat_sym_pmd.c
drivers/crypto/qat/qat_sym_session.c
drivers/crypto/qat/qat_sym_session.h

index 9ed1f21..01a897a 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
        return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+               void *session __rte_unused)
+{
+       return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
        qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
index b4ec440..64e6ae6 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
        return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+               uint8_t hash_flag)
+{
+       struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+       struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+                       (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+                       session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+       /* Set the Use Extended Protocol Flags bit in LW 1 */
+       QAT_FIELD_SET(header->comn_req_flags,
+                       QAT_COMN_EXT_FLAGS_USED,
+                       QAT_COMN_EXT_FLAGS_BITPOS,
+                       QAT_COMN_EXT_FLAGS_MASK);
+
+       /* Set Hash Flags in LW 28 */
+       cd_ctrl->hash_flags |= hash_flag;
+
+       /* Set proto flags in LW 1 */
+       switch (session->qat_cipher_alg) {
+       case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_SNOW_3G_PROTO);
+               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+                               header->serv_specif_flags, 0);
+               break;
+       case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+                               header->serv_specif_flags,
+                               ICP_QAT_FW_LA_ZUC_3G_PROTO);
+               break;
+       default:
+               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+                               header->serv_specif_flags, 0);
+               break;
+       }
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+       struct rte_cryptodev *dev = cdev;
+       struct qat_sym_session *ctx = session;
+       const struct qat_cryptodev_private *qat_private =
+                       dev->data->dev_private;
+       int ret;
+
+       ret = qat_sym_crypto_set_session_gen1(cdev, session);
+       if (ret == -ENOTSUP) {
+               /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+                * but some are not supported by GEN2, so checking here
+                */
+               if ((qat_private->internal_capabilities &
+                               QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+                       return -ENOTSUP;
+
+               if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+                               ctx->qat_cipher_alg !=
+                               ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx,
+                               1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+               } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+                               ctx->qat_cipher_alg !=
+                               ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx,
+                               1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+               } else if ((ctx->aes_cmac ||
+                               ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+                               (ctx->qat_cipher_alg ==
+                               ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+                               ctx->qat_cipher_alg ==
+                               ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+               }
+
+               ret = 0;
+       }
+
+       return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
        /* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
        qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
        qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
                        qat_sym_crypto_cap_get_gen2;
+       qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+                       qat_sym_crypto_set_session_gen2;
        qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
                        qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
                        qat_asym_crypto_cap_get_gen1;
        qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
                        qat_asym_crypto_feature_flags_get_gen1;
+       qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+                       qat_asym_crypto_set_session_gen1;
 }
index d3336cf..db864d9 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,257 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
        return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+       struct icp_qat_fw_la_bulk_req *req,
+       struct rte_crypto_va_iova_ptr *iv,
+       struct rte_crypto_va_iova_ptr *digest,
+       struct rte_crypto_va_iova_ptr *aad,
+       union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+       if (ctx->is_single_pass) {
+               struct icp_qat_fw_la_cipher_req_params *cipher_param =
+                       (void *)&req->serv_specif_rqpars;
+
+               /* QAT GEN3 uses single pass to treat AEAD as
+                * cipher operation
+                */
+               cipher_param = (void *)&req->serv_specif_rqpars;
+
+               qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+               cipher_param->cipher_offset = ofs.ofs.cipher.head;
+               cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+                               ofs.ofs.cipher.tail;
+
+               cipher_param->spc_aad_addr = aad->iova;
+               cipher_param->spc_auth_res_addr = digest->iova;
+
+               return;
+       }
+
+       enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+       struct qat_sym_op_cookie *cookie,
+       struct icp_qat_fw_la_bulk_req *req,
+       struct rte_crypto_va_iova_ptr *digest,
+       struct rte_crypto_va_iova_ptr *auth_iv,
+       union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       uint32_t ver_key_offset;
+       uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+                       ofs.ofs.auth.tail;
+
+       if (!ctx->is_single_pass_gmac ||
+                       (auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+               enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+                               data_len);
+               return;
+       }
+
+       cipher_cd_ctrl = (void *) &req->cd_ctrl;
+       cipher_param = (void *)&req->serv_specif_rqpars;
+       ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+                       ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+                       ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+                       ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+                       sizeof(struct icp_qat_hw_cipher_config);
+
+       if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+               ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+               /* AES-GMAC */
+               qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+                               req);
+       }
+
+       /* Fill separate Content Descriptor for this op */
+       rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+                       ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+                               ctx->cd.cipher.key :
+                               RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+                       ctx->auth_key_length);
+       cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+                       ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+                               ICP_QAT_HW_CIPHER_AEAD_MODE,
+                               ctx->qat_cipher_alg,
+                               ICP_QAT_HW_CIPHER_NO_CONVERT,
+                               (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+                                       ICP_QAT_HW_CIPHER_ENCRYPT :
+                                       ICP_QAT_HW_CIPHER_DECRYPT));
+       QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+                       ctx->digest_length,
+                       QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+                       QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+       cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+                       ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+       /* Update the request */
+       req->cd_pars.u.s.content_desc_addr =
+                       cookie->opt.spc_gmac.cd_phys_addr;
+       req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+                       sizeof(struct icp_qat_hw_cipher_config) +
+                       ctx->auth_key_length, 8) >> 3;
+       req->comn_mid.src_length = data_len;
+       req->comn_mid.dst_length = 0;
+
+       cipher_param->spc_aad_addr = 0;
+       cipher_param->spc_auth_res_addr = digest->iova;
+       cipher_param->spc_aad_sz = auth_data_len;
+       cipher_param->reserved = 0;
+       cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+       req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+       cipher_cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+                       req->comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+       ICP_QAT_FW_LA_PROTO_SET(
+                       req->comn_hdr.serv_specif_flags,
+                       ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+               uint8_t *out_msg, void *op_cookie)
+{
+       register struct icp_qat_fw_la_bulk_req *req;
+       struct rte_crypto_op *op = in_op;
+       struct qat_sym_op_cookie *cookie = op_cookie;
+       struct rte_crypto_sgl in_sgl, out_sgl;
+       struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+                       out_vec[QAT_SYM_SGL_MAX_NUMBER];
+       struct rte_crypto_va_iova_ptr cipher_iv;
+       struct rte_crypto_va_iova_ptr aad;
+       struct rte_crypto_va_iova_ptr digest;
+       union rte_crypto_sym_ofs ofs;
+       int32_t total_len;
+
+       in_sgl.vec = in_vec;
+       out_sgl.vec = out_vec;
+
+       req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+       rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+       ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+                       &cipher_iv, &aad, &digest);
+       if (unlikely(ofs.raw == UINT64_MAX)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return -EINVAL;
+       }
+
+       total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+                       in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+       if (unlikely(total_len < 0)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return -EINVAL;
+       }
+
+       enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+               total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+                       NULL, &aad, &digest);
+#endif
+
+       return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+               uint8_t *out_msg, void *op_cookie)
+{
+       register struct icp_qat_fw_la_bulk_req *req;
+       struct rte_crypto_op *op = in_op;
+       struct qat_sym_op_cookie *cookie = op_cookie;
+       struct rte_crypto_sgl in_sgl, out_sgl;
+       struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+                       out_vec[QAT_SYM_SGL_MAX_NUMBER];
+       struct rte_crypto_va_iova_ptr auth_iv;
+       struct rte_crypto_va_iova_ptr digest;
+       union rte_crypto_sym_ofs ofs;
+       int32_t total_len;
+
+       in_sgl.vec = in_vec;
+       out_sgl.vec = out_vec;
+
+       req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+       rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+       ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+                       NULL, &auth_iv, &digest);
+       if (unlikely(ofs.raw == UINT64_MAX)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return -EINVAL;
+       }
+
+       total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+                       in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+       if (unlikely(total_len < 0)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return -EINVAL;
+       }
+
+       enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+                       ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+                       &auth_iv, NULL, &digest);
+#endif
+
+       return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+       struct qat_sym_session *ctx = session;
+       enum rte_proc_type_t proc_type = rte_eal_process_type();
+       int ret;
+
+       ret = qat_sym_crypto_set_session_gen1(cdev, session);
+       /* special single pass build request for GEN3 */
+       if (ctx->is_single_pass)
+               ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+       else if (ctx->is_single_pass_gmac)
+               ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+       if (ret == -ENOTSUP) {
+               /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+                * this is addressed by GEN3
+                */
+               if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+                               ctx->qat_cipher_alg !=
+                               ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx,
+                               1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+               } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+                               ctx->qat_cipher_alg !=
+                               ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx,
+                               1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+               } else if ((ctx->aes_cmac ||
+                               ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+                               (ctx->qat_cipher_alg ==
+                               ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+                               ctx->qat_cipher_alg ==
+                               ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+               }
+
+               ret = 0;
+       }
+
+       return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
        qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
                        qat_sym_crypto_cap_get_gen3;
        qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
                        qat_sym_crypto_feature_flags_get_gen1;
+       qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+                       qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
        qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
                        qat_sym_create_security_gen1;
@@ -161,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
        qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
        qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
        qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+       qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
index 37a58c0..7642a87 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,11 +103,133 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
        return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+       struct icp_qat_fw_la_bulk_req *req,
+       struct rte_crypto_va_iova_ptr *iv,
+       struct rte_crypto_va_iova_ptr *digest,
+       struct rte_crypto_va_iova_ptr *aad,
+       union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+       if (ctx->is_single_pass && ctx->is_ucs) {
+               struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+                       (void *)&req->serv_specif_rqpars;
+               struct icp_qat_fw_la_cipher_req_params *cipher_param =
+                       (void *)&req->serv_specif_rqpars;
+
+               /* QAT GEN4 uses single pass to treat AEAD as cipher
+                * operation
+                */
+               qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+                               req);
+               cipher_param->cipher_offset = ofs.ofs.cipher.head;
+               cipher_param->cipher_length = data_len -
+                               ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+               cipher_param_20->spc_aad_addr = aad->iova;
+               cipher_param_20->spc_auth_res_addr = digest->iova;
+
+               return;
+       }
+
+       enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+               uint8_t *out_msg, void *op_cookie)
+{
+       register struct icp_qat_fw_la_bulk_req *qat_req;
+       struct rte_crypto_op *op = in_op;
+       struct qat_sym_op_cookie *cookie = op_cookie;
+       struct rte_crypto_sgl in_sgl, out_sgl;
+       struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+                       out_vec[QAT_SYM_SGL_MAX_NUMBER];
+       struct rte_crypto_va_iova_ptr cipher_iv;
+       struct rte_crypto_va_iova_ptr aad;
+       struct rte_crypto_va_iova_ptr digest;
+       union rte_crypto_sym_ofs ofs;
+       int32_t total_len;
+
+       in_sgl.vec = in_vec;
+       out_sgl.vec = out_vec;
+
+       qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+       rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+       ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+                       &cipher_iv, &aad, &digest);
+       if (unlikely(ofs.raw == UINT64_MAX)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return -EINVAL;
+       }
+
+       total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+                       in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+       if (unlikely(total_len < 0)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               return -EINVAL;
+       }
+
+       enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+               total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+                       NULL, &aad, &digest);
+#endif
+
+       return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+       struct qat_sym_session *ctx = session;
+       enum rte_proc_type_t proc_type = rte_eal_process_type();
+       int ret;
+
+       ret = qat_sym_crypto_set_session_gen1(cdev, session);
+       /* special single pass build request for GEN4 */
+       if (ctx->is_single_pass && ctx->is_ucs)
+               ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+       if (ret == -ENOTSUP) {
+               /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+                * this is addressed by GEN4
+                */
+               if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+                               ctx->qat_cipher_alg !=
+                               ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx,
+                               1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+               } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+                               ctx->qat_cipher_alg !=
+                               ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx,
+                               1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+               } else if ((ctx->aes_cmac ||
+                               ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+                               (ctx->qat_cipher_alg ==
+                               ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+                               ctx->qat_cipher_alg ==
+                               ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+                       qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+               }
+
+               ret = 0;
+       }
+
+       return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
        qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
        qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
                        qat_sym_crypto_cap_get_gen4;
+       qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+                       qat_sym_crypto_set_session_gen4;
        qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
                        qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
        qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
        qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
        qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+       qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
index 1130e0e..96cdb97 100644 (file)
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
index 2dd7475..ff176dd 100644 (file)
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+       struct qat_sym_session *ctx = session;
+       qat_sym_build_request_t build_request = NULL;
+       enum rte_proc_type_t proc_type = rte_eal_process_type();
+       int handle_mixed = 0;
+
+       if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+                       ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+                       !ctx->is_gmac) {
+               /* AES-GCM or AES-CCM */
+               if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+                       ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+                       (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+                       && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+                       && ctx->qat_hash_alg ==
+                                       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+                       /* do_aead = 1; */
+                       build_request = qat_sym_build_op_aead_gen1;
+               } else {
+                       /* do_auth = 1; do_cipher = 1; */
+                       build_request = qat_sym_build_op_chain_gen1;
+                       handle_mixed = 1;
+               }
+       } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+               /* do_auth = 1; do_cipher = 0;*/
+               build_request = qat_sym_build_op_auth_gen1;
+       } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+               /* do_auth = 0; do_cipher = 1; */
+               build_request = qat_sym_build_op_cipher_gen1;
+       }
+
+       if (build_request)
+               ctx->build_request[proc_type] = build_request;
+       else
+               return -EINVAL;
+
+       /* no more work if not mixed op */
+       if (!handle_mixed)
+               return 0;
+
+       /* Check none supported algs if mixed */
+       if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+                       ctx->qat_cipher_alg !=
+                       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+               return -ENOTSUP;
+       } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+                       ctx->qat_cipher_alg !=
+                       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+               return -ENOTSUP;
+       } else if ((ctx->aes_cmac ||
+                       ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+                       (ctx->qat_cipher_alg ==
+                       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+                       ctx->qat_cipher_alg ==
+                       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+               return -ENOTSUP;
+       }
+
+       return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
        qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
        qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
                        qat_sym_crypto_cap_get_gen1;
+       qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+                       qat_sym_crypto_set_session_gen1;
        qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
                        qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
index 6eaa15b..5ca76fc 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
        get_feature_flags_t get_feature_flags;
        get_capabilities_info_t get_capabilities;
        struct rte_cryptodev_ops *cryptodev_ops;
+       set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
        create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
                struct rte_cryptodev_config *config);
index f814bf8..83bf55c 100644 (file)
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
                struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-               void *op_cookie, enum qat_device_gen qat_dev_gen)
+               void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
        int ret = 0;
        struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                return -EINVAL;
        }
 
-       if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-               QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-               op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-               return -EINVAL;
-       }
-
        qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
        rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
        qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
index 28a2626..63ab9a5 100644 (file)
@@ -20,9 +20,9 @@
 
 #define MIXED_CRYPTO_MIN_FW_VER 0x04090000
 
-uint8_t qat_sym_driver_id;
+extern uint8_t qat_sym_driver_id;
 
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
 void
 qat_sym_init_op_cookie(void *op_cookie)
index 8ca475c..3a88009 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>       /* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
        return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-               uint8_t hash_flag)
-{
-       struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-       struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-                       (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-                       session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-       /* Set the Use Extended Protocol Flags bit in LW 1 */
-       QAT_FIELD_SET(header->comn_req_flags,
-                       QAT_COMN_EXT_FLAGS_USED,
-                       QAT_COMN_EXT_FLAGS_BITPOS,
-                       QAT_COMN_EXT_FLAGS_MASK);
-
-       /* Set Hash Flags in LW 28 */
-       cd_ctrl->hash_flags |= hash_flag;
-
-       /* Set proto flags in LW 1 */
-       switch (session->qat_cipher_alg) {
-       case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-                               ICP_QAT_FW_LA_SNOW_3G_PROTO);
-               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-                               header->serv_specif_flags, 0);
-               break;
-       case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-                               ICP_QAT_FW_LA_NO_PROTO);
-               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-                               header->serv_specif_flags,
-                               ICP_QAT_FW_LA_ZUC_3G_PROTO);
-               break;
-       default:
-               ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-                               ICP_QAT_FW_LA_NO_PROTO);
-               ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-                               header->serv_specif_flags, 0);
-               break;
-       }
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-               struct qat_sym_session *session)
-{
-       const struct qat_cryptodev_private *qat_private =
-                       dev->data->dev_private;
-       enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-                       QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-       if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-                       session->qat_cipher_alg !=
-                       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-               session->min_qat_dev_gen = min_dev_gen;
-               qat_sym_session_set_ext_hash_flags(session,
-                       1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-       } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-                       session->qat_cipher_alg !=
-                       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-               session->min_qat_dev_gen = min_dev_gen;
-               qat_sym_session_set_ext_hash_flags(session,
-                       1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-       } else if ((session->aes_cmac ||
-                       session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-                       (session->qat_cipher_alg ==
-                       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-                       session->qat_cipher_alg ==
-                       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-               session->min_qat_dev_gen = min_dev_gen;
-               qat_sym_session_set_ext_hash_flags(session, 0);
-       }
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
        enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
        int ret;
        int qat_cmd_id;
-       int handle_mixed = 0;
 
        /* Verify the session physical address is known */
        rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
        session->cd_paddr = session_paddr +
                        offsetof(struct qat_sym_session, cd);
 
-       session->min_qat_dev_gen = QAT_GEN1;
+       session->dev_id = internals->dev_id;
        session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
        session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                                        xform, session);
                        if (ret < 0)
                                return ret;
-                       handle_mixed = 1;
                }
                break;
        case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                                        xform, session);
                        if (ret < 0)
                                return ret;
-                       handle_mixed = 1;
                }
                break;
        case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                return -ENOTSUP;
        }
        qat_sym_session_finalize(session);
-       if (handle_mixed) {
-               /* Special handling of mixed hash+cipher algorithms */
-               qat_sym_session_handle_mixed(dev, session);
-       }
 
-       return 0;
+       return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+                       (void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
        session->is_single_pass = 1;
        session->is_auth = 1;
-       session->min_qat_dev_gen = QAT_GEN3;
        session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
        /* Chacha-Poly is special case that use QAT CTR mode */
-       if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+       if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
                session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-       } else {
+       else
                session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-       }
+
        session->cipher_iv.offset = aead_xform->iv.offset;
        session->cipher_iv.length = aead_xform->iv.length;
        session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
        return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-                       uint8_t *data_in,
-                       uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+               uint8_t *data_in, uint8_t *data_out)
 {
        int digest_size;
        uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
                cipher_cd_ctrl->cipher_state_sz =
                        ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
                cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-               cdesc->min_qat_dev_gen = QAT_GEN2;
        } else {
                total_key_size = cipherkeylen;
                cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
                memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
                cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
                auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-               cdesc->min_qat_dev_gen = QAT_GEN2;
 
                break;
        case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
        session->cd_paddr = session_paddr +
                        offsetof(struct qat_sym_session, cd);
 
-       session->min_qat_dev_gen = QAT_GEN1;
-
        /* Get requested QAT command id - should be cipher */
        qat_cmd_id = qat_get_cmd_id(xform);
        if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
        void *sess_private_data;
        struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       struct qat_cryptodev_private *internals = cdev->data->dev_private;
+       enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+       struct qat_sym_session *sym_session = NULL;
        int ret;
 
        if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
        }
 
        set_sec_session_private_data(sess, sess_private_data);
+       sym_session = (struct qat_sym_session *)sess_private_data;
+       sym_session->dev_id = internals->dev_id;
 
-       return ret;
+       return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+                       sess_private_data);
 }
 
 int
index fe875a7..01908ab 100644 (file)
@@ -105,7 +105,7 @@ struct qat_sym_session {
        uint16_t auth_key_length;
        uint16_t digest_length;
        rte_spinlock_t lock;    /* protects this struct */
-       enum qat_device_gen min_qat_dev_gen;
+       uint16_t dev_id;
        uint8_t aes_cmac;
        uint8_t is_single_pass;
        uint8_t is_single_pass_gmac;