/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
+#include <rte_cryptodev_pci.h>
#include <openssl/evp.h>
#include "qat_logs.h"
#define BYTE_LENGTH 8
-static int __rte_unused
+static int
qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
struct qat_pmd_private *internals) {
int i = 0;
return 0;
}
-static int __rte_unused
+static int
qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
struct qat_pmd_private *internals) {
int i = 0;
return NULL;
}
void *
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev __rte_unused,
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_session *session = session_private;
-
+ struct qat_pmd_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
/* Get cipher xform from crypto xform chain */
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ goto error_out;
+ }
+ if (qat_alg_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_F8:
case RTE_CRYPTO_CIPHER_AES_XTS:
case RTE_CRYPTO_CIPHER_ARC4:
- case RTE_CRYPTO_CIPHER_ZUC_EEA3:
PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
goto error_out;
}
struct qat_session *
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev __rte_unused,
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct qat_session *session_private)
{
struct qat_session *session = session_private;
struct rte_crypto_auth_xform *auth_xform = NULL;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct qat_pmd_private *internals = dev->data->dev_private;
auth_xform = qat_get_auth_xform(xform);
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_KASUMI_F9:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ goto error_out;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA256:
case RTE_CRYPTO_AUTH_SHA512:
case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
goto error_out;
iv = last_block - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sym_op->cipher.iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
iv = dst - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sym_op->cipher.iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "BPI: src before post-process:", last_block,
tmp_qp->stats.enqueue_err_count++;
/*
* This message cannot be enqueued,
- * decrease number of ops that wasnt sent
+ * decrease number of ops that wasn't sent
*/
rte_atomic16_sub(&tmp_qp->inflights16,
nb_ops_possible - nb_ops_sent);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
+
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
+ uint8_t *iv_ptr;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
return -EINVAL;
}
#endif
- if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
if (ctx->qat_cipher_alg ==
ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
if (unlikely(
(cipher_param->cipher_length % BYTE_LENGTH != 0)
|| (cipher_param->cipher_offset
% BYTE_LENGTH != 0))) {
PMD_DRV_LOG(ERR,
- "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
cipher_ofs = op->sym->cipher.data.offset;
}
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ op->sym->cipher.iv.offset);
/* copy IV into request if it fits */
- if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
+ /*
+ * If IV length is zero do not copy anything but still
+ * use request descriptor embedded IV
+ *
+ */
+ if (op->sym->cipher.iv.length) {
+ if (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ iv_ptr,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ op->sym->cipher.iv.offset);
+ }
}
min_ofs = cipher_ofs;
}
if (do_auth) {
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
|| (auth_param->auth_len % BYTE_LENGTH != 0))) {
PMD_DRV_LOG(ERR,
- "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
}
}
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ auth_ofs = op->sym->cipher.data.offset;
+ auth_len = op->sym->cipher.data.length;
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
if (op->sym->cipher.iv.length == 12) {
/*
- * For GCM a 12 bit IV is allowed,
+ * For GCM a 12 byte IV is allowed,
* but we need to inform the f/w
*/
ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
rte_hexdump(stdout, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
- rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- op->sym->auth.digest.length);
- rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
- op->sym->auth.aad.length);
+ if (do_cipher)
+ rte_hexdump(stdout, "iv:", iv_ptr,
+ op->sym->cipher.iv.length);
+
+ if (do_auth) {
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
+ }
#endif
return 0;
}
return 0;
}
-void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
{
struct qat_pmd_private *internals = dev->data->dev_private;
info->capabilities = internals->qat_dev_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
}
}
}
stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
}
}