* Copyright(c) 2015-2018 Intel Corporation
*/
+#include <openssl/evp.h>
+
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
#include <rte_bus_pci.h>
#include <rte_byteorder.h>
-#include <openssl/evp.h>
-
-#include "qat_logs.h"
-#include "qat_sym_session.h"
#include "qat_sym.h"
-#include "adf_transport_access_macros.h"
-
-#define BYTE_LENGTH 8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-/** Encrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt the IV, then XOR this with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_encrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_encrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
- return -EINVAL;
-}
/** Decrypt a single partial block
* Depends on openssl libcrypto
return 0;
cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
return -EINVAL;
}
-/** Creates a context in either AES or DES in ECB mode
- * Depends on openssl libcrypto
- */
-static inline uint32_t
-adf_modulo(uint32_t data, uint32_t shift);
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
struct rte_crypto_op *op)
{
int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
iv = rte_crypto_op_ctod_offset(op, uint8_t *,
ctx->cipher_iv.offset);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
- last_block_len);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
- last_block_len);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
#endif
bpi_cipher_decrypt(last_block, dst, iv, block_len,
last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
- last_block_len);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
- last_block_len);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
#endif
}
return sym_op->cipher.data.length - last_block_len;
}
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len > 0 &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
- /* Encrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset;
-
- last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = dst - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src before post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before post-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_encrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src after post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after post-process:", dst,
- last_block_len);
-#endif
- }
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
- WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, q->tail);
- q->nb_pending_requests = 0;
- q->csr_tail = q->tail;
-}
-
-uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register struct rte_crypto_op **cur_op = ops;
- register int ret;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
- int overflow;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- tmp_qp->inflights16 += nb_ops;
- overflow = tmp_qp->inflights16 - queue->max_inflights;
- if (overflow > 0) {
- tmp_qp->inflights16 -= overflow;
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- while (nb_ops_sent != nb_ops_possible) {
- ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
- if (ret != 0) {
- tmp_qp->stats.enqueue_err_count++;
- /*
- * This message cannot be enqueued,
- * decrease number of ops that wasn't sent
- */
- tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
-
- tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_ops_sent++;
- cur_op++;
- }
-kick_tail:
- queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- queue->nb_pending_requests += nb_ops_sent;
- if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
- queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
- txq_write_tail(tmp_qp, queue);
- }
- return nb_ops_sent;
-}
-
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
-{
- uint32_t old_head, new_head;
- uint32_t max_head;
-
- old_head = q->csr_head;
- new_head = q->head;
- max_head = qp->nb_descriptors * q->msg_size;
-
- /* write out free descriptors */
- void *cur_desc = (uint8_t *)q->base_addr + old_head;
-
- if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
- } else {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
- }
- q->nb_processed_responses = 0;
- q->csr_head = new_head;
-
- /* write current head to CSR */
- WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, new_head);
-}
-
-uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct qat_queue *rx_queue, *tx_queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- uint32_t msg_counter = 0;
- struct rte_crypto_op *rx_op;
- struct icp_qat_fw_comn_resp *resp_msg;
- uint32_t head;
-
- rx_queue = &(tmp_qp->rx_q);
- tx_queue = &(tmp_qp->tx_q);
- head = rx_queue->head;
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
-
- while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_ops) {
- rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- struct qat_session *sess = (struct qat_session *)
- get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
-
- if (sess->bpi_ctx)
- qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- }
-
- head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
- *ops = rx_op;
- ops++;
- msg_counter++;
- }
- if (msg_counter > 0) {
- rx_queue->head = head;
- tmp_qp->stats.dequeued_count += msg_counter;
- rx_queue->nb_processed_responses += msg_counter;
- tmp_qp->inflights16 -= msg_counter;
-
- if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
- rxq_free_desc(tmp_qp, rx_queue);
- }
- /* also check if tail needs to be advanced */
- if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
- tx_queue->tail != tx_queue->csr_tail) {
- txq_write_tail(tmp_qp, tx_queue);
- }
- return msg_counter;
-}
-
-static inline int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
- struct qat_alg_buf_list *list, uint32_t data_len)
-{
- int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buff_start + rte_pktmbuf_data_len(buf);
-
- list->bufers[0].addr = buff_start;
- list->bufers[0].resrvd = 0;
- list->bufers[0].len = buf_len;
-
- if (data_len <= buf_len) {
- list->num_bufs = nr;
- list->bufers[0].len = data_len;
- return 0;
- }
-
- buf = buf->next;
- while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
- " entry(%u)",
- QAT_SGL_MAX_NUMBER);
- return -EINVAL;
- }
-
- list->bufers[nr].len = rte_pktmbuf_data_len(buf);
- list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_iova(buf);
-
- buf_len += list->bufers[nr].len;
- buf = buf->next;
-
- if (buf_len > data_len) {
- list->bufers[nr].len -=
- buf_len - data_len;
- buf = NULL;
- }
- ++nr;
- }
- list->num_bufs = nr;
-
- return 0;
-}
-
static inline void
set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
struct icp_qat_fw_la_cipher_req_params *cipher_param,
iv_length);
}
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
{
int ret = 0;
- struct qat_session *ctx;
+ struct qat_sym_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
"operation requests, op (%p) is not a "
"symmetric operation.", op);
return -EINVAL;
}
-#endif
+
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- ctx = (struct qat_session *)get_session_private_data(
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
op->sym->session, cryptodev_qat_driver_id);
if (unlikely(ctx == NULL)) {
- PMD_DRV_LOG(ERR, "Session was not created for this device");
+ QAT_DP_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
- PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return -EINVAL;
}
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
if (unlikely(
- (cipher_param->cipher_length % BYTE_LENGTH != 0)
- || (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
"SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
- || (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &qat_op_cookie->qat_sgl_list_src,
- qat_req->comn_mid.src_length);
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
return ret;
}
if (likely(op->sym->m_dst == NULL))
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
+ cookie->qat_sgl_src_phys_addr;
else {
ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &qat_op_cookie->qat_sgl_list_dst,
- qat_req->comn_mid.dst_length);
-
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot "
- "fill sgl array");
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
return ret;
}
qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
+ cookie->qat_sgl_src_phys_addr;
qat_req->comn_mid.dest_data_addr =
- qat_op_cookie->qat_sgl_dst_phys_addr;
+ cookie->qat_sgl_dst_phys_addr;
}
} else {
qat_req->comn_mid.src_data_addr = src_buf_start;
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "qat_req:", qat_req,
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
- rte_hexdump(stdout, "src_data:",
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
if (do_cipher) {
uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
uint8_t *,
ctx->cipher_iv.offset);
- rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
ctx->cipher_iv.length);
}
uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
uint8_t *,
ctx->auth_iv.offset);
- rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
ctx->auth_iv.length);
}
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
ctx->digest_length);
}
if (do_aead) {
- rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
ctx->digest_length);
- rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
ctx->aad_len);
}
#endif
return 0;
}
-
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
-{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
-
- return data - mult;
-}
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
- return;
- }
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
- continue;
- }
-
- stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.dequeued_count;
- stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
- }
-}
-
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- for (i = 0; i < dev->data->nb_queue_pairs; i++)
- memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
- PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
-}