crypto/qat: do not use cryptodev driver
[dpdk.git] / drivers / crypto / qat / qat_crypto.c
index b9baf36..8b7b2fa 100644 (file)
@@ -60,6 +60,7 @@
 #include <rte_spinlock.h>
 #include <rte_hexdump.h>
 #include <rte_crypto_sym.h>
+#include <rte_cryptodev_pci.h>
 #include <openssl/evp.h>
 
 #include "qat_logs.h"
@@ -757,7 +758,7 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
                        tmp_qp->stats.enqueue_err_count++;
                        /*
                         * This message cannot be enqueued,
-                        * decrease number of ops that wasnt sent
+                        * decrease number of ops that wasn't sent
                         */
                        rte_atomic16_sub(&tmp_qp->inflights16,
                                        nb_ops_possible - nb_ops_sent);
@@ -971,17 +972,24 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
                }
 
                /* copy IV into request if it fits */
-               if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
-                               sizeof(cipher_param->u.cipher_IV_array))) {
-                       rte_memcpy(cipher_param->u.cipher_IV_array,
-                                       op->sym->cipher.iv.data,
-                                       op->sym->cipher.iv.length);
-               } else {
-                       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-                                       qat_req->comn_hdr.serv_specif_flags,
-                                       ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-                       cipher_param->u.s.cipher_IV_ptr =
-                                       op->sym->cipher.iv.phys_addr;
+               /*
+                * If IV length is zero do not copy anything but still
+                * use request descriptor embedded IV
+                *
+                */
+               if (op->sym->cipher.iv.length) {
+                       if (op->sym->cipher.iv.length <=
+                                       sizeof(cipher_param->u.cipher_IV_array)) {
+                               rte_memcpy(cipher_param->u.cipher_IV_array,
+                                               op->sym->cipher.iv.data,
+                                               op->sym->cipher.iv.length);
+                       } else {
+                               ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+                                               qat_req->comn_hdr.serv_specif_flags,
+                                               ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+                               cipher_param->u.s.cipher_IV_ptr =
+                                               op->sym->cipher.iv.phys_addr;
+                       }
                }
                min_ofs = cipher_ofs;
        }
@@ -1014,6 +1022,12 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
                                }
                        }
 
+               } else if (ctx->qat_hash_alg ==
+                                       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+                               ctx->qat_hash_alg ==
+                                       ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+                       auth_ofs = op->sym->cipher.data.offset;
+                       auth_len = op->sym->cipher.data.length;
                } else {
                        auth_ofs = op->sym->auth.data.offset;
                        auth_len = op->sym->auth.data.length;
@@ -1227,8 +1241,8 @@ int qat_dev_close(struct rte_cryptodev *dev)
        return 0;
 }
 
-void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
-                               struct rte_cryptodev_info *info)
+void qat_dev_info_get(struct rte_cryptodev *dev,
+                       struct rte_cryptodev_info *info)
 {
        struct qat_pmd_private *internals = dev->data->dev_private;
 
@@ -1241,6 +1255,7 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
                info->capabilities = internals->qat_dev_capabilities;
                info->sym.max_nb_sessions = internals->max_nb_sessions;
                info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+               info->pci_dev = RTE_DEV_TO_PCI(dev->device);
        }
 }
 
@@ -1262,9 +1277,9 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
                }
 
                stats->enqueued_count += qp[i]->stats.enqueued_count;
-               stats->dequeued_count += qp[i]->stats.enqueued_count;
+               stats->dequeued_count += qp[i]->stats.dequeued_count;
                stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
-               stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
+               stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
        }
 }