crypto/qat: move common qat files to common dir
authorTomasz Jozwiak <tomaszx.jozwiak@intel.com>
Mon, 2 Jul 2018 09:39:13 +0000 (11:39 +0200)
committerPablo de Lara <pablo.de.lara.guarch@intel.com>
Tue, 10 Jul 2018 22:55:30 +0000 (00:55 +0200)
-  moved common qat files to common/qat dir.
  -  changed common/qat/Makefile, common/qat/meson.build,
     drivers/Makefile, crypto/Makefile
     to add possibility of using new files locations
  -  added README file into crypto/qat to clarify where
     the build is made from
  -  updated MAINTAINERS file

Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>
35 files changed:
MAINTAINERS
drivers/Makefile
drivers/common/meson.build
drivers/common/qat/Makefile [new file with mode: 0644]
drivers/common/qat/meson.build [new file with mode: 0644]
drivers/common/qat/qat_adf/adf_transport_access_macros.h [new file with mode: 0644]
drivers/common/qat/qat_adf/icp_qat_fw.h [new file with mode: 0644]
drivers/common/qat/qat_adf/icp_qat_fw_la.h [new file with mode: 0644]
drivers/common/qat/qat_adf/icp_qat_hw.h [new file with mode: 0644]
drivers/common/qat/qat_common.c [new file with mode: 0644]
drivers/common/qat/qat_common.h [new file with mode: 0644]
drivers/common/qat/qat_device.c [new file with mode: 0644]
drivers/common/qat/qat_device.h [new file with mode: 0644]
drivers/common/qat/qat_logs.c [new file with mode: 0644]
drivers/common/qat/qat_logs.h [new file with mode: 0644]
drivers/common/qat/qat_qp.c [new file with mode: 0644]
drivers/common/qat/qat_qp.h [new file with mode: 0644]
drivers/crypto/Makefile
drivers/crypto/qat/Makefile [deleted file]
drivers/crypto/qat/README [new file with mode: 0644]
drivers/crypto/qat/meson.build
drivers/crypto/qat/qat_adf/adf_transport_access_macros.h [deleted file]
drivers/crypto/qat/qat_adf/icp_qat_fw.h [deleted file]
drivers/crypto/qat/qat_adf/icp_qat_fw_la.h [deleted file]
drivers/crypto/qat/qat_adf/icp_qat_hw.h [deleted file]
drivers/crypto/qat/qat_common.c [deleted file]
drivers/crypto/qat/qat_common.h [deleted file]
drivers/crypto/qat/qat_device.c [deleted file]
drivers/crypto/qat/qat_device.h [deleted file]
drivers/crypto/qat/qat_logs.c [deleted file]
drivers/crypto/qat/qat_logs.h [deleted file]
drivers/crypto/qat/qat_qp.c [deleted file]
drivers/crypto/qat/qat_qp.h [deleted file]
drivers/crypto/qat/qat_sym.h
drivers/crypto/qat/qat_sym_pmd.h

index dabb12d..8050b5d 100644 (file)
@@ -777,6 +777,7 @@ M: John Griffin <john.griffin@intel.com>
 M: Fiona Trahe <fiona.trahe@intel.com>
 M: Deepak Kumar Jain <deepak.k.jain@intel.com>
 F: drivers/crypto/qat/
+F: drivers/common/qat/
 F: doc/guides/cryptodevs/qat.rst
 F: doc/guides/cryptodevs/features/qat.ini
 
index c88638c..7566076 100644 (file)
@@ -13,6 +13,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += baseband
 DEPDIRS-baseband := common bus mempool
 DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
 DEPDIRS-crypto := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += common/qat
+DEPDIRS-common/qat := bus mempool
 DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress
 DEPDIRS-compress := bus mempool
 DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
index 5f6341b..d7b7d8c 100644 (file)
@@ -2,6 +2,6 @@
 # Copyright(c) 2018 Cavium, Inc
 
 std_deps = ['eal']
-drivers = ['octeontx']
+drivers = ['octeontx', 'qat']
 config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
 driver_name_fmt = 'rte_common_@0@'
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
new file mode 100644 (file)
index 0000000..02e83f9
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_qat.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+
+# build directories
+QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat
+VPATH=$(QAT_CRYPTO_DIR)
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I$(QAT_CRYPTO_DIR)
+
+# library common source files
+SRCS-y += qat_device.c
+SRCS-y += qat_common.c
+SRCS-y += qat_logs.c
+SRCS-y += qat_qp.c
+
+# library symmetric crypto source files
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+       LDLIBS += -lrte_cryptodev
+       LDLIBS += -lcrypto
+       CFLAGS += -DBUILD_QAT_SYM
+       SRCS-y += qat_sym.c
+       SRCS-y += qat_sym_session.c
+       SRCS-y += qat_sym_pmd.c
+endif
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := ../../crypto/qat/rte_pmd_qat_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
new file mode 100644 (file)
index 0000000..80b6b25
--- /dev/null
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# This does not build a driver, but instead holds common files for
+# the crypto and compression drivers.
+build = false
+qat_deps = ['bus_pci']
+qat_sources = files('qat_common.c',
+               'qat_qp.c',
+               'qat_device.c',
+               'qat_logs.c')
+qat_includes = [include_directories('.', 'qat_adf')]
+qat_ext_deps = []
+qat_cflags = []
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
new file mode 100644 (file)
index 0000000..1eef551
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include <rte_io.h>
+
+/* CSR write macro */
+#define ADF_CSR_WR(csrAddr, csrOffset, val)            \
+       rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ADF_CSR_RD(csrAddr, csrOffset)                 \
+       rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+#define ADF_RING_EMPTY_SIG_BYTE 0x7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Maximum number of qps on a device for any service type */
+#define ADF_MAX_QPS_ON_ANY_SERVICE     2
+#define ADF_RING_DIR_TX                        0
+#define ADF_RING_DIR_RX                        1
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+                               ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+                               SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+       ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size)        \
+       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+       uint32_t l_base = 0, u_base = 0; \
+       l_base = (uint32_t)(value & 0xFFFFFFFF); \
+       u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);  \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_COL_CTL, \
+                       ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
new file mode 100644 (file)
index 0000000..ae39b7f
--- /dev/null
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <sys/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+               (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+       (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+       ICP_QAT_FW_COMN_RESP_SERV_NULL,
+       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+       ICP_QAT_FW_COMN_REQ_NULL = 0,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+       ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t serv_specif_fields[4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+       uint64_t opaque_data;
+       uint64_t src_data_addr;
+       uint64_t dest_data_addr;
+       uint32_t src_length;
+       uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+       uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+       uint8_t resrvd1;
+       uint8_t service_cmd_id;
+       uint8_t service_type;
+       uint8_t hdr_flags;
+       uint16_t serv_specif_flags;
+       uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+       uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+       uint8_t xlat_err_code;
+       uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+       uint8_t resrvd1;
+       uint8_t service_id;
+       uint8_t response_type;
+       uint8_t hdr_flags;
+       struct icp_qat_fw_comn_error comn_error;
+       uint8_t comn_status;
+       uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_hdr;
+       uint64_t opaque_data;
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+                       QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+       QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+       QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+       ICP_QAT_FW_SLICE_NULL = 0,
+       ICP_QAT_FW_SLICE_CIPHER = 1,
+       ICP_QAT_FW_SLICE_AUTH = 2,
+       ICP_QAT_FW_SLICE_DRAM_RD = 3,
+       ICP_QAT_FW_SLICE_DRAM_WR = 4,
+       ICP_QAT_FW_SLICE_COMP = 5,
+       ICP_QAT_FW_SLICE_XLAT = 6,
+       ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
new file mode 100644 (file)
index 0000000..c33bc3f
--- /dev/null
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+       ICP_QAT_FW_LA_CMD_CIPHER = 0,
+       ICP_QAT_FW_LA_CMD_AUTH = 1,
+       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+       ICP_QAT_FW_LA_CMD_MGF1 = 9,
+       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+       ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO        2
+#define ICP_QAT_FW_LA_CCM_PROTO        1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+       cmp_auth, ret_auth, update_state, \
+       ciph_iv, ciphcfg, partial) \
+       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+       ((proto & QAT_LA_PROTO_MASK) << \
+       QAT_LA_PROTO_BITPOS)    | \
+       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+       QAT_LA_CMP_AUTH_RES_BITPOS) | \
+       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+       QAT_LA_RET_AUTH_RES_BITPOS) | \
+       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+       QAT_LA_UPDATE_STATE_BITPOS) | \
+       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+       QAT_LA_CIPH_IV_FLD_BITPOS) | \
+       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+       ((partial & QAT_LA_PARTIAL_MASK) << \
+       QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+       QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+       uint8_t cipher_state_sz;
+       uint8_t cipher_key_sz;
+       uint8_t cipher_cfg_offset;
+       uint8_t next_curr_id;
+       uint8_t cipher_padding_sz;
+       uint8_t resrvd1;
+       uint16_t resrvd2;
+       uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+       uint32_t resrvd1;
+       uint8_t resrvd2;
+       uint8_t hash_flags;
+       uint8_t hash_cfg_offset;
+       uint8_t next_curr_id;
+       uint8_t resrvd3;
+       uint8_t outer_prefix_sz;
+       uint8_t final_sz;
+       uint8_t inner_res_sz;
+       uint8_t resrvd4;
+       uint8_t inner_state1_sz;
+       uint8_t inner_state2_offset;
+       uint8_t inner_state2_sz;
+       uint8_t outer_config_offset;
+       uint8_t outer_state1_sz;
+       uint8_t outer_res_sz;
+       uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+       uint8_t cipher_state_sz;
+       uint8_t cipher_key_sz;
+       uint8_t cipher_cfg_offset;
+       uint8_t next_curr_id_cipher;
+       uint8_t cipher_padding_sz;
+       uint8_t hash_flags;
+       uint8_t hash_cfg_offset;
+       uint8_t next_curr_id_auth;
+       uint8_t resrvd1;
+       uint8_t outer_prefix_sz;
+       uint8_t final_sz;
+       uint8_t inner_res_sz;
+       uint8_t resrvd2;
+       uint8_t inner_state1_sz;
+       uint8_t inner_state2_offset;
+       uint8_t inner_state2_sz;
+       uint8_t outer_config_offset;
+       uint8_t outer_state1_sz;
+       uint8_t outer_res_sz;
+       uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+       uint32_t cipher_offset;
+       uint32_t cipher_length;
+       union {
+               uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       uint64_t cipher_IV_ptr;
+                       uint64_t resrvd1;
+               } s;
+       } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+       uint32_t auth_off;
+       uint32_t auth_len;
+       union {
+               uint64_t auth_partial_st_prefix;
+               uint64_t aad_adr;
+       } u1;
+       uint64_t auth_res_addr;
+       union {
+               uint8_t inner_prefix_sz;
+               uint8_t aad_sz;
+       } u2;
+       uint8_t resrvd1;
+       uint8_t hash_state_sz;
+       uint8_t auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+       union {
+               uint8_t inner_prefix_sz;
+               uint8_t aad_sz;
+       } u2;
+       uint8_t resrvd1;
+       uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       uint64_t opaque_data;
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h
new file mode 100644 (file)
index 0000000..56e3cf7
--- /dev/null
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+       ICP_QAT_HW_AE_0 = 0,
+       ICP_QAT_HW_AE_1 = 1,
+       ICP_QAT_HW_AE_2 = 2,
+       ICP_QAT_HW_AE_3 = 3,
+       ICP_QAT_HW_AE_4 = 4,
+       ICP_QAT_HW_AE_5 = 5,
+       ICP_QAT_HW_AE_6 = 6,
+       ICP_QAT_HW_AE_7 = 7,
+       ICP_QAT_HW_AE_8 = 8,
+       ICP_QAT_HW_AE_9 = 9,
+       ICP_QAT_HW_AE_10 = 10,
+       ICP_QAT_HW_AE_11 = 11,
+       ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+       ICP_QAT_HW_QAT_0 = 0,
+       ICP_QAT_HW_QAT_1 = 1,
+       ICP_QAT_HW_QAT_2 = 2,
+       ICP_QAT_HW_QAT_3 = 3,
+       ICP_QAT_HW_QAT_4 = 4,
+       ICP_QAT_HW_QAT_5 = 5,
+       ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+       ICP_QAT_HW_AUTH_MODE0 = 0,
+       ICP_QAT_HW_AUTH_MODE1 = 1,
+       ICP_QAT_HW_AUTH_MODE2 = 2,
+       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+       uint32_t config;
+       uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+        QAT_AUTH_ALGO_SHA3_BITPOS) | \
+        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+       uint32_t counter;
+       uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+       struct icp_qat_hw_auth_config auth_config;
+       struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+       ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+       struct icp_qat_hw_auth_setup inner_setup;
+       uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+       struct icp_qat_hw_auth_setup outer_setup;
+       uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+       struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+       ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+       ICP_QAT_HW_CIPHER_F8_MODE = 3,
+       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+       uint32_t val;
+       uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+       ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_auth_op {
+       ICP_QAT_HW_AUTH_VERIFY = 0,
+       ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+
+/* These defines describe position of the bit-fields
+ * in the flags byte in B0
+ */
+#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT      6
+#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT          3
+
+#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q)                  \
+       ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
+       | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
+       | ((q) - 1))
+
+#define ICP_QAT_HW_CCM_NQ_CONST 15
+#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
+#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
+#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
+               ICP_QAT_HW_CCM_AAD_LEN_INFO)
+#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
+#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
+#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
+
+struct icp_qat_hw_cipher_algo_blk {
+       struct icp_qat_hw_cipher_config cipher_config;
+       uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
+} __rte_cache_aligned;
+
+#endif
diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c
new file mode 100644 (file)
index 0000000..c206d3b
--- /dev/null
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_common.h"
+#include "qat_device.h"
+#include "qat_logs.h"
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
+               struct qat_sgl *list, uint32_t data_len)
+{
+       int nr = 1;
+
+       uint32_t buf_len = rte_pktmbuf_iova(buf) -
+                       buf_start + rte_pktmbuf_data_len(buf);
+
+       list->buffers[0].addr = buf_start;
+       list->buffers[0].resrvd = 0;
+       list->buffers[0].len = buf_len;
+
+       if (data_len <= buf_len) {
+               list->num_bufs = nr;
+               list->buffers[0].len = data_len;
+               return 0;
+       }
+
+       buf = buf->next;
+       while (buf) {
+               if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+                       QAT_LOG(ERR,
+                               "QAT PMD exceeded size of QAT SGL entry(%u)",
+                                       QAT_SGL_MAX_NUMBER);
+                       return -EINVAL;
+               }
+
+               list->buffers[nr].len = rte_pktmbuf_data_len(buf);
+               list->buffers[nr].resrvd = 0;
+               list->buffers[nr].addr = rte_pktmbuf_iova(buf);
+
+               buf_len += list->buffers[nr].len;
+               buf = buf->next;
+
+               if (buf_len > data_len) {
+                       list->buffers[nr].len -=
+                               buf_len - data_len;
+                       buf = NULL;
+               }
+               ++nr;
+       }
+       list->num_bufs = nr;
+
+       return 0;
+}
+
+void qat_stats_get(struct qat_pci_device *dev,
+               struct qat_common_stats *stats,
+               enum qat_service_type service)
+{
+       int i;
+       struct qat_qp **qp;
+
+       if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
+               QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
+                               stats, dev, service);
+               return;
+       }
+
+       qp = dev->qps_in_use[service];
+       for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+               if (qp[i] == NULL) {
+                       QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+                                       service, i);
+                       continue;
+               }
+
+               stats->enqueued_count += qp[i]->stats.enqueued_count;
+               stats->dequeued_count += qp[i]->stats.dequeued_count;
+               stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+               stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+       }
+}
+
+void qat_stats_reset(struct qat_pci_device *dev,
+               enum qat_service_type service)
+{
+       int i;
+       struct qat_qp **qp;
+
+       if (dev == NULL || service >= QAT_SERVICE_INVALID) {
+               QAT_LOG(ERR, "invalid param: dev %p, service %d",
+                               dev, service);
+               return;
+       }
+
+       qp = dev->qps_in_use[service];
+       for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+               if (qp[i] == NULL) {
+                       QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+                                       service, i);
+                       continue;
+               }
+               memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+       }
+
+       QAT_LOG(DEBUG, "QAT: %d stats cleared", service);
+}
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
new file mode 100644 (file)
index 0000000..db85d54
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_COMMON_H_
+#define _QAT_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+/**< Intel(R) QAT device name for PCI registration */
+#define QAT_PCI_NAME   qat
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SGL_MAX_NUMBER     16
+
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/* Intel(R) QuickAssist Technology device generation is enumerated
+ * from one according to the generation of the device
+ */
+enum qat_device_gen {
+       QAT_GEN1 = 1,
+       QAT_GEN2
+};
+
+enum qat_service_type {
+       QAT_SERVICE_ASYMMETRIC = 0,
+       QAT_SERVICE_SYMMETRIC,
+       QAT_SERVICE_COMPRESSION,
+       QAT_SERVICE_INVALID
+};
+#define QAT_MAX_SERVICES               (QAT_SERVICE_INVALID)
+
+/**< Common struct for scatter-gather list operations */
+struct qat_flat_buf {
+       uint32_t len;
+       uint32_t resrvd;
+       uint64_t addr;
+} __rte_packed;
+
+struct qat_sgl {
+       uint64_t resrvd;
+       uint32_t num_bufs;
+       uint32_t num_mapped_bufs;
+       struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+/** Common, i.e. not service-specific, statistics */
+struct qat_common_stats {
+       uint64_t enqueued_count;
+       /**< Count of all operations enqueued */
+       uint64_t dequeued_count;
+       /**< Count of all operations dequeued */
+
+       uint64_t enqueue_err_count;
+       /**< Total error count on operations enqueued */
+       uint64_t dequeue_err_count;
+       /**< Total error count on operations dequeued */
+};
+
+struct qat_pci_device;
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
+               struct qat_sgl *list, uint32_t data_len);
+void
+qat_stats_get(struct qat_pci_device *dev,
+               struct qat_common_stats *stats,
+               enum qat_service_type service);
+void
+qat_stats_reset(struct qat_pci_device *dev,
+               enum qat_service_type service);
+
+#endif /* _QAT_COMMON_H_ */
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
new file mode 100644 (file)
index 0000000..64f236e
--- /dev/null
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_sym_pmd.h"
+
+/* Hardware device information per generation */
+__extension__
+struct qat_gen_hw_data qat_gen_config[] =  {
+       [QAT_GEN1] = {
+               .dev_gen = QAT_GEN1,
+               .qp_hw_data = qat_gen1_qps,
+       },
+       [QAT_GEN2] = {
+               .dev_gen = QAT_GEN2,
+               .qp_hw_data = qat_gen1_qps,
+               /* gen2 has same ring layout as gen1 */
+       },
+};
+
+
+static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
+static int qat_nb_pci_devices;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+               {
+                       RTE_PCI_DEVICE(0x8086, 0x0443),
+               },
+               {
+                       RTE_PCI_DEVICE(0x8086, 0x37c9),
+               },
+               {
+                       RTE_PCI_DEVICE(0x8086, 0x19e3),
+               },
+               {
+                       RTE_PCI_DEVICE(0x8086, 0x6f55),
+               },
+               {.device_id = 0},
+};
+
+
+static struct qat_pci_device *
+qat_pci_get_dev(uint8_t dev_id)
+{
+       return &qat_pci_devices[dev_id];
+}
+
+static struct qat_pci_device *
+qat_pci_get_named_dev(const char *name)
+{
+       struct qat_pci_device *dev;
+       unsigned int i;
+
+       if (name == NULL)
+               return NULL;
+
+       for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
+               dev = &qat_pci_devices[i];
+
+               if ((dev->attached == QAT_ATTACHED) &&
+                               (strcmp(dev->name, name) == 0))
+                       return dev;
+       }
+
+       return NULL;
+}
+
+static uint8_t
+qat_pci_find_free_device_index(void)
+{
+       uint8_t dev_id;
+
+       for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
+               if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
+                       break;
+       }
+       return dev_id;
+}
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
+{
+       char name[QAT_DEV_NAME_MAX_LEN];
+
+       rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+       return qat_pci_get_named_dev(name);
+}
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+       struct qat_pci_device *qat_dev;
+       uint8_t qat_dev_id;
+       char name[QAT_DEV_NAME_MAX_LEN];
+
+       rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+       snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+       if (qat_pci_get_named_dev(name) != NULL) {
+               QAT_LOG(ERR, "QAT device with name %s already allocated!",
+                               name);
+               return NULL;
+       }
+
+       qat_dev_id = qat_pci_find_free_device_index();
+       if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
+               QAT_LOG(ERR, "Reached maximum number of QAT devices");
+               return NULL;
+       }
+
+       qat_dev = qat_pci_get_dev(qat_dev_id);
+       memset(qat_dev, 0, sizeof(*qat_dev));
+       snprintf(qat_dev->name, QAT_DEV_NAME_MAX_LEN, "%s", name);
+       qat_dev->qat_dev_id = qat_dev_id;
+       qat_dev->pci_dev = pci_dev;
+       switch (qat_dev->pci_dev->id.device_id) {
+       case 0x0443:
+               qat_dev->qat_dev_gen = QAT_GEN1;
+               break;
+       case 0x37c9:
+       case 0x19e3:
+       case 0x6f55:
+               qat_dev->qat_dev_gen = QAT_GEN2;
+               break;
+       default:
+               QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+               return NULL;
+       }
+
+       rte_spinlock_init(&qat_dev->arb_csr_lock);
+
+       qat_dev->attached = QAT_ATTACHED;
+
+       qat_nb_pci_devices++;
+
+       QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
+                       qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
+
+       return qat_dev;
+}
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev)
+{
+       struct qat_pci_device *qat_dev;
+       char name[QAT_DEV_NAME_MAX_LEN];
+
+       if (pci_dev == NULL)
+               return -EINVAL;
+
+       rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+       snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+       qat_dev = qat_pci_get_named_dev(name);
+       if (qat_dev != NULL) {
+
+               /* Check that there are no service devs still on pci device */
+               if (qat_dev->sym_dev != NULL)
+                       return -EBUSY;
+
+               qat_dev->attached = QAT_DETACHED;
+               qat_nb_pci_devices--;
+       }
+       QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
+                               name, qat_nb_pci_devices);
+       return 0;
+}
+
+static int
+qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
+               struct rte_pci_device *pci_dev)
+{
+       qat_sym_dev_destroy(qat_pci_dev);
+       qat_comp_dev_destroy(qat_pci_dev);
+       qat_asym_dev_destroy(qat_pci_dev);
+       return qat_pci_device_release(pci_dev);
+}
+
+static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+               struct rte_pci_device *pci_dev)
+{
+       int ret = 0;
+       struct qat_pci_device *qat_pci_dev;
+
+       QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+                       pci_dev->addr.bus,
+                       pci_dev->addr.devid,
+                       pci_dev->addr.function);
+
+       qat_pci_dev = qat_pci_device_allocate(pci_dev);
+       if (qat_pci_dev == NULL)
+               return -ENODEV;
+
+       ret = qat_sym_dev_create(qat_pci_dev);
+       if (ret != 0)
+               goto error_out;
+
+       ret = qat_comp_dev_create(qat_pci_dev);
+       if (ret != 0)
+               goto error_out;
+
+       ret = qat_asym_dev_create(qat_pci_dev);
+       if (ret != 0)
+               goto error_out;
+
+       return 0;
+
+error_out:
+       qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+       return ret;
+
+}
+
+static int qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+       struct qat_pci_device *qat_pci_dev;
+
+       if (pci_dev == NULL)
+               return -EINVAL;
+
+       qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
+       if (qat_pci_dev == NULL)
+               return 0;
+
+       return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+       .id_table = pci_id_qat_map,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+       .probe = qat_pci_probe,
+       .remove = qat_pci_remove
+};
+
+__attribute__((weak)) int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+       return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+       return 0;
+}
+
+__attribute__((weak)) int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+       return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+       return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+       return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+       return 0;
+}
+
+RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
new file mode 100644 (file)
index 0000000..0cb370c
--- /dev/null
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_DEVICE_H_
+#define _QAT_DEVICE_H_
+
+#include <rte_bus_pci.h>
+
+#include "qat_common.h"
+#include "qat_logs.h"
+#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
+
+#define QAT_DETACHED  (0)
+#define QAT_ATTACHED  (1)
+
+#define QAT_DEV_NAME_MAX_LEN   64
+
+/*
+ * This struct holds all the data about a QAT pci device
+ * including data about all services it supports.
+ * It contains
+ *  - hw_data
+ *  - config data
+ *  - runtime data
+ */
+struct qat_sym_dev_private;
+struct qat_pci_device {
+
+       /* Data used by all services */
+       char name[QAT_DEV_NAME_MAX_LEN];
+       /**< Name of qat pci device */
+       uint8_t qat_dev_id;
+       /**< Device instance for this qat pci device */
+       struct rte_pci_device *pci_dev;
+       /**< PCI information. */
+       enum qat_device_gen qat_dev_gen;
+       /**< QAT device generation */
+       rte_spinlock_t arb_csr_lock;
+       /**< lock to protect accesses to the arbiter CSR */
+       __extension__
+       uint8_t attached : 1;
+       /**< Flag indicating the device is attached */
+
+       struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
+       /**< links to qps set up for each service, index same as on API */
+
+       /* Data relating to symmetric crypto service */
+       struct qat_sym_dev_private *sym_dev;
+       /**< link back to cryptodev private data */
+       struct rte_device sym_rte_dev;
+       /**< This represents the crypto subset of this pci device.
+        * Register with this rather than with the one in
+        * pci_dev so that its driver can have a crypto-specific name
+        */
+
+       /* Data relating to compression service */
+
+       /* Data relating to asymmetric crypto service */
+
+};
+
+struct qat_gen_hw_data {
+       enum qat_device_gen dev_gen;
+       const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
+};
+
+extern struct qat_gen_hw_data qat_gen_config[];
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev);
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
+
+/* declaration needed for weak functions */
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+#endif /* _QAT_DEVICE_H_ */
diff --git a/drivers/common/qat/qat_logs.c b/drivers/common/qat/qat_logs.c
new file mode 100644 (file)
index 0000000..fa3df85
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_hexdump.h>
+
+#include "qat_logs.h"
+
+int qat_gen_logtype;
+int qat_dp_logtype;
+
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+               const void *buf, unsigned int len)
+{
+       if (level > rte_log_get_global_level())
+               return 0;
+       if (level > (uint32_t)(rte_log_get_level(logtype)))
+               return 0;
+
+       rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file,
+                               title, buf, len);
+       return 0;
+}
+
+RTE_INIT(qat_pci_init_log);
+static void
+qat_pci_init_log(void)
+{
+       /* Non-data-path logging for pci device and all services */
+       qat_gen_logtype = rte_log_register("pmd.qat_general");
+       if (qat_gen_logtype >= 0)
+               rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE);
+
+       /* data-path logging for all services */
+       qat_dp_logtype = rte_log_register("pmd.qat_dp");
+       if (qat_dp_logtype >= 0)
+               rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/common/qat/qat_logs.h b/drivers/common/qat/qat_logs.h
new file mode 100644 (file)
index 0000000..4baea12
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+extern int qat_gen_logtype;
+extern int qat_dp_logtype;
+
+#define QAT_LOG(level, fmt, args...)                   \
+       rte_log(RTE_LOG_ ## level, qat_gen_logtype,             \
+                       "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_LOG(level, fmt, args...)                        \
+       rte_log(RTE_LOG_ ## level, qat_dp_logtype,              \
+                       "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_HEXDUMP_LOG(level, title, buf, len)             \
+       qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len)
+
+/**
+ * qat_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file
+ * is undefined.
+ */
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+               const void *buf, unsigned int len);
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
new file mode 100644 (file)
index 0000000..32c1759
--- /dev/null
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "qat_sym.h"
+#include "adf_transport_access_macros.h"
+
+
+#define ADF_MAX_DESC                           4096
+#define ADF_MIN_DESC                           128
+
+#define ADF_ARB_REG_SLOT                       0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET            0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+       (ADF_ARB_REG_SLOT * index), value)
+
+__extension__
+const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+                                        [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+       /* queue pairs which provide an asymmetric crypto service */
+       [QAT_SERVICE_ASYMMETRIC] = {
+               {
+                       .service_type = QAT_SERVICE_ASYMMETRIC,
+                       .hw_bundle_num = 0,
+                       .tx_ring_num = 0,
+                       .rx_ring_num = 8,
+                       .tx_msg_size = 64,
+                       .rx_msg_size = 32,
+
+               }, {
+                       .service_type = QAT_SERVICE_ASYMMETRIC,
+                       .hw_bundle_num = 0,
+                       .tx_ring_num = 1,
+                       .rx_ring_num = 9,
+                       .tx_msg_size = 64,
+                       .rx_msg_size = 32,
+               }
+       },
+       /* queue pairs which provide a symmetric crypto service */
+       [QAT_SERVICE_SYMMETRIC] = {
+               {
+                       .service_type = QAT_SERVICE_SYMMETRIC,
+                       .hw_bundle_num = 0,
+                       .tx_ring_num = 2,
+                       .rx_ring_num = 10,
+                       .tx_msg_size = 128,
+                       .rx_msg_size = 32,
+               },
+               {
+                       .service_type = QAT_SERVICE_SYMMETRIC,
+                       .hw_bundle_num = 0,
+                       .tx_ring_num = 3,
+                       .rx_ring_num = 11,
+                       .tx_msg_size = 128,
+                       .rx_msg_size = 32,
+               }
+       },
+       /* queue pairs which provide a compression service */
+       [QAT_SERVICE_COMPRESSION] = {
+               {
+                       .service_type = QAT_SERVICE_COMPRESSION,
+                       .hw_bundle_num = 0,
+                       .tx_ring_num = 6,
+                       .rx_ring_num = 14,
+                       .tx_msg_size = 128,
+                       .rx_msg_size = 32,
+               }, {
+                       .service_type = QAT_SERVICE_COMPRESSION,
+                       .hw_bundle_num = 0,
+                       .tx_ring_num = 7,
+                       .rx_ring_num = 15,
+                       .tx_msg_size = 128,
+                       .rx_msg_size = 32,
+               }
+       }
+};
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+       uint32_t queue_size_bytes);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct qat_pci_device *qat_dev,
+       struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+       uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+       rte_spinlock_t *lock);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+       rte_spinlock_t *lock);
+
+
+int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+               enum qat_service_type service)
+{
+       int i, count;
+
+       for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
+               if (qp_hw_data[i].service_type == service)
+                       count++;
+       return count;
+}
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+                       int socket_id)
+{
+       const struct rte_memzone *mz;
+
+       mz = rte_memzone_lookup(queue_name);
+       if (mz != 0) {
+               if (((size_t)queue_size <= mz->len) &&
+                               ((socket_id == SOCKET_ID_ANY) ||
+                                       (socket_id == mz->socket_id))) {
+                       QAT_LOG(DEBUG, "re-use memzone already "
+                                       "allocated for %s", queue_name);
+                       return mz;
+               }
+
+               QAT_LOG(ERR, "Incompatible memzone already "
+                               "allocated %s, size %u, socket %d. "
+                               "Requested size %u, socket %u",
+                               queue_name, (uint32_t)mz->len,
+                               mz->socket_id, queue_size, socket_id);
+               return NULL;
+       }
+
+       QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+                                       queue_name, queue_size, socket_id);
+       return rte_memzone_reserve_aligned(queue_name, queue_size,
+               socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+int qat_qp_setup(struct qat_pci_device *qat_dev,
+               struct qat_qp **qp_addr,
+               uint16_t queue_pair_id,
+               struct qat_qp_config *qat_qp_conf)
+
+{
+       struct qat_qp *qp;
+       struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+       char op_cookie_pool_name[RTE_RING_NAMESIZE];
+       uint32_t i;
+
+       QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
+               queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
+
+       if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
+               (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
+               QAT_LOG(ERR, "Can't create qp for %u descriptors",
+                               qat_qp_conf->nb_descriptors);
+               return -EINVAL;
+       }
+
+       if (pci_dev->mem_resource[0].addr == NULL) {
+               QAT_LOG(ERR, "Could not find VF config space "
+                               "(UIO driver attached?).");
+               return -EINVAL;
+       }
+
+       /* Allocate the queue pair data structure. */
+       qp = rte_zmalloc("qat PMD qp metadata",
+                       sizeof(*qp), RTE_CACHE_LINE_SIZE);
+       if (qp == NULL) {
+               QAT_LOG(ERR, "Failed to alloc mem for qp struct");
+               return -ENOMEM;
+       }
+       qp->nb_descriptors = qat_qp_conf->nb_descriptors;
+       qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+                       qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+                       RTE_CACHE_LINE_SIZE);
+       if (qp->op_cookies == NULL) {
+               QAT_LOG(ERR, "Failed to alloc mem for cookie");
+               rte_free(qp);
+               return -ENOMEM;
+       }
+
+       qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+       qp->inflights16 = 0;
+
+       if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
+                                       ADF_RING_DIR_TX) != 0) {
+               QAT_LOG(ERR, "Tx queue create failed "
+                               "queue_pair_id=%u", queue_pair_id);
+               goto create_err;
+       }
+
+       if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
+                                       ADF_RING_DIR_RX) != 0) {
+               QAT_LOG(ERR, "Rx queue create failed "
+                               "queue_pair_id=%hu", queue_pair_id);
+               qat_queue_delete(&(qp->tx_q));
+               goto create_err;
+       }
+
+       adf_configure_queues(qp);
+       adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
+                                       &qat_dev->arb_csr_lock);
+
+       snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
+                                       "%s%d_cookies_%s_qp%hu",
+               pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+               qat_qp_conf->service_str, queue_pair_id);
+
+       QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
+       qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+       if (qp->op_cookie_pool == NULL)
+               qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+                               qp->nb_descriptors,
+                               qat_qp_conf->cookie_size, 64, 0,
+                               NULL, NULL, NULL, NULL, qat_qp_conf->socket_id,
+                               0);
+       if (!qp->op_cookie_pool) {
+               QAT_LOG(ERR, "QAT PMD Cannot create"
+                               " op mempool");
+               goto create_err;
+       }
+
+       for (i = 0; i < qp->nb_descriptors; i++) {
+               if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+                       QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
+                       goto create_err;
+               }
+       }
+
+       qp->qat_dev_gen = qat_dev->qat_dev_gen;
+       qp->build_request = qat_qp_conf->build_request;
+       qp->service_type = qat_qp_conf->hw->service_type;
+       qp->qat_dev = qat_dev;
+
+       QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
+                       queue_pair_id, op_cookie_pool_name);
+
+       *qp_addr = qp;
+       return 0;
+
+create_err:
+       if (qp->op_cookie_pool)
+               rte_mempool_free(qp->op_cookie_pool);
+       rte_free(qp->op_cookies);
+       rte_free(qp);
+       return -EFAULT;
+}
+
+int qat_qp_release(struct qat_qp **qp_addr)
+{
+       struct qat_qp *qp = *qp_addr;
+       uint32_t i;
+
+       if (qp == NULL) {
+               QAT_LOG(DEBUG, "qp already freed");
+               return 0;
+       }
+
+       QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
+                               qp->qat_dev->qat_dev_id);
+
+       /* Don't free memory if there are still responses to be processed */
+       if (qp->inflights16 == 0) {
+               qat_queue_delete(&(qp->tx_q));
+               qat_queue_delete(&(qp->rx_q));
+       } else {
+               return -EAGAIN;
+       }
+
+       adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
+                                       &qp->qat_dev->arb_csr_lock);
+
+       for (i = 0; i < qp->nb_descriptors; i++)
+               rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+       if (qp->op_cookie_pool)
+               rte_mempool_free(qp->op_cookie_pool);
+
+       rte_free(qp->op_cookies);
+       rte_free(qp);
+       *qp_addr = NULL;
+       return 0;
+}
+
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+       const struct rte_memzone *mz;
+       int status = 0;
+
+       if (queue == NULL) {
+               QAT_LOG(DEBUG, "Invalid queue");
+               return;
+       }
+       QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
+                       queue->hw_queue_number, queue->memz_name);
+
+       mz = rte_memzone_lookup(queue->memz_name);
+       if (mz != NULL) {
+               /* Write an unused pattern to the queue memory. */
+               memset(queue->base_addr, 0x7F, queue->queue_size);
+               status = rte_memzone_free(mz);
+               if (status != 0)
+                       QAT_LOG(ERR, "Error %d on freeing queue %s",
+                                       status, queue->memz_name);
+       } else {
+               QAT_LOG(DEBUG, "queue %s doesn't exist",
+                               queue->memz_name);
+       }
+}
+
+static int
+qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
+               struct qat_qp_config *qp_conf, uint8_t dir)
+{
+       uint64_t queue_base;
+       void *io_addr;
+       const struct rte_memzone *qp_mz;
+       struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+       int ret = 0;
+       uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
+                       qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
+       uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
+
+       queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
+       queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
+                       qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
+
+       if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+               QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
+               return -EINVAL;
+       }
+
+       /*
+        * Allocate a memzone for the queue - create a unique name.
+        */
+       snprintf(queue->memz_name, sizeof(queue->memz_name),
+                       "%s_%d_%s_%s_%d_%d",
+               pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+               qp_conf->service_str, "qp_mem",
+               queue->hw_bundle_number, queue->hw_queue_number);
+       qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+                       qp_conf->socket_id);
+       if (qp_mz == NULL) {
+               QAT_LOG(ERR, "Failed to allocate ring memzone");
+               return -ENOMEM;
+       }
+
+       queue->base_addr = (char *)qp_mz->addr;
+       queue->base_phys_addr = qp_mz->iova;
+       if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+                       queue_size_bytes)) {
+               QAT_LOG(ERR, "Invalid alignment on queue create "
+                                       " 0x%"PRIx64"\n",
+                                       queue->base_phys_addr);
+               ret = -EFAULT;
+               goto queue_create_err;
+       }
+
+       if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
+                       &(queue->queue_size)) != 0) {
+               QAT_LOG(ERR, "Invalid num inflights");
+               ret = -EINVAL;
+               goto queue_create_err;
+       }
+
+       queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+                                       ADF_BYTES_TO_MSG_SIZE(desc_size));
+       queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
+
+       if (queue->max_inflights < 2) {
+               QAT_LOG(ERR, "Invalid num inflights");
+               ret = -EINVAL;
+               goto queue_create_err;
+       }
+       queue->head = 0;
+       queue->tail = 0;
+       queue->msg_size = desc_size;
+
+       /*
+        * Write an unused pattern to the queue memory.
+        */
+       memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+       queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+                                       queue->queue_size);
+
+       io_addr = pci_dev->mem_resource[0].addr;
+
+       WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+                       queue->hw_queue_number, queue_base);
+
+       QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
+               " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
+                       queue->memz_name,
+                       queue->queue_size, queue_size_bytes,
+                       qp_conf->nb_descriptors, desc_size,
+                       queue->max_inflights, queue->modulo_mask);
+
+       return 0;
+
+queue_create_err:
+       rte_memzone_free(qp_mz);
+       return ret;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+                                       uint32_t queue_size_bytes)
+{
+       if (((queue_size_bytes - 1) & phys_addr) != 0)
+               return -EINVAL;
+       return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+       uint32_t *p_queue_size_for_csr)
+{
+       uint8_t i = ADF_MIN_RING_SIZE;
+
+       for (; i <= ADF_MAX_RING_SIZE; i++)
+               if ((msg_size * msg_num) ==
+                               (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+                       *p_queue_size_for_csr = i;
+                       return 0;
+               }
+       QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+       return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+                                       rte_spinlock_t *lock)
+{
+       uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
+                                       (ADF_ARB_REG_SLOT *
+                                                       txq->hw_bundle_number);
+       uint32_t value;
+
+       rte_spinlock_lock(lock);
+       value = ADF_CSR_RD(base_addr, arb_csr_offset);
+       value |= (0x01 << txq->hw_queue_number);
+       ADF_CSR_WR(base_addr, arb_csr_offset, value);
+       rte_spinlock_unlock(lock);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+                                       rte_spinlock_t *lock)
+{
+       uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
+                                       (ADF_ARB_REG_SLOT *
+                                                       txq->hw_bundle_number);
+       uint32_t value;
+
+       rte_spinlock_lock(lock);
+       value = ADF_CSR_RD(base_addr, arb_csr_offset);
+       value &= ~(0x01 << txq->hw_queue_number);
+       ADF_CSR_WR(base_addr, arb_csr_offset, value);
+       rte_spinlock_unlock(lock);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+       uint32_t queue_config;
+       struct qat_queue *queue = &qp->tx_q;
+
+       queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+       WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+                       queue->hw_queue_number, queue_config);
+
+       queue = &qp->rx_q;
+       queue_config =
+                       BUILD_RESP_RING_CONFIG(queue->queue_size,
+                                       ADF_RING_NEAR_WATERMARK_512,
+                                       ADF_RING_NEAR_WATERMARK_0);
+
+       WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+                       queue->hw_queue_number, queue_config);
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
+{
+       return data & modulo_mask;
+}
+
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+       WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+                       q->hw_queue_number, q->tail);
+       q->nb_pending_requests = 0;
+       q->csr_tail = q->tail;
+}
+
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+       uint32_t old_head, new_head;
+       uint32_t max_head;
+
+       old_head = q->csr_head;
+       new_head = q->head;
+       max_head = qp->nb_descriptors * q->msg_size;
+
+       /* write out free descriptors */
+       void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+       if (new_head < old_head) {
+               memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+               memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
+       } else {
+               memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
+       }
+       q->nb_processed_responses = 0;
+       q->csr_head = new_head;
+
+       /* write current head to CSR */
+       WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+                           q->hw_queue_number, new_head);
+}
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+       register struct qat_queue *queue;
+       struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+       register uint32_t nb_ops_sent = 0;
+       register int ret;
+       uint16_t nb_ops_possible = nb_ops;
+       register uint8_t *base_addr;
+       register uint32_t tail;
+       int overflow;
+
+       if (unlikely(nb_ops == 0))
+               return 0;
+
+       /* read params used a lot in main loop into registers */
+       queue = &(tmp_qp->tx_q);
+       base_addr = (uint8_t *)queue->base_addr;
+       tail = queue->tail;
+
+       /* Find how many can actually fit on the ring */
+       tmp_qp->inflights16 += nb_ops;
+       overflow = tmp_qp->inflights16 - queue->max_inflights;
+       if (overflow > 0) {
+               tmp_qp->inflights16 -= overflow;
+               nb_ops_possible = nb_ops - overflow;
+               if (nb_ops_possible == 0)
+                       return 0;
+       }
+
+       while (nb_ops_sent != nb_ops_possible) {
+               ret = tmp_qp->build_request(*ops, base_addr + tail,
+                               tmp_qp->op_cookies[tail / queue->msg_size],
+                               tmp_qp->qat_dev_gen);
+               if (ret != 0) {
+                       tmp_qp->stats.enqueue_err_count++;
+                       /*
+                        * This message cannot be enqueued,
+                        * decrease number of ops that wasn't sent
+                        */
+                       tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
+                       if (nb_ops_sent == 0)
+                               return 0;
+                       goto kick_tail;
+               }
+
+               tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
+               ops++;
+               nb_ops_sent++;
+       }
+kick_tail:
+       queue->tail = tail;
+       tmp_qp->stats.enqueued_count += nb_ops_sent;
+       queue->nb_pending_requests += nb_ops_sent;
+       if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+                   queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+               txq_write_tail(tmp_qp, queue);
+       }
+       return nb_ops_sent;
+}
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+       struct qat_queue *rx_queue, *tx_queue;
+       struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+       uint32_t head;
+       uint32_t resp_counter = 0;
+       uint8_t *resp_msg;
+
+       rx_queue = &(tmp_qp->rx_q);
+       tx_queue = &(tmp_qp->tx_q);
+       head = rx_queue->head;
+       resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
+
+       while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+                       resp_counter != nb_ops) {
+
+               if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
+                       qat_sym_process_response(ops, resp_msg);
+               /* add qat_asym_process_response here */
+               /* add qat_comp_process_response here */
+
+               head = adf_modulo(head + rx_queue->msg_size,
+                                 rx_queue->modulo_mask);
+
+               resp_msg = (uint8_t *)rx_queue->base_addr + head;
+               ops++;
+               resp_counter++;
+       }
+       if (resp_counter > 0) {
+               rx_queue->head = head;
+               tmp_qp->stats.dequeued_count += resp_counter;
+               rx_queue->nb_processed_responses += resp_counter;
+               tmp_qp->inflights16 -= resp_counter;
+
+               if (rx_queue->nb_processed_responses >
+                                               QAT_CSR_HEAD_WRITE_THRESH)
+                       rxq_free_desc(tmp_qp, rx_queue);
+       }
+       /* also check if tail needs to be advanced */
+       if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+               tx_queue->tail != tx_queue->csr_tail) {
+               txq_write_tail(tmp_qp, tx_queue);
+       }
+       return resp_counter;
+}
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
new file mode 100644 (file)
index 0000000..59db945
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_QP_H_
+#define _QAT_QP_H_
+
+#include "qat_common.h"
+#include "adf_transport_access_macros.h"
+
+struct qat_pci_device;
+
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+#define QAT_CSR_TAIL_WRITE_THRESH 32U
+/* number of requests to accumulate before writing tail CSR */
+#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
+/* number of inflights below which no tail write coalescing should occur */
+
+typedef int (*build_request_t)(void *op,
+               uint8_t *req, void *op_cookie,
+               enum qat_device_gen qat_dev_gen);
+/**< Build a request from an op. */
+
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_hw_data {
+       enum qat_service_type service_type;
+       uint8_t hw_bundle_num;
+       uint8_t tx_ring_num;
+       uint8_t rx_ring_num;
+       uint16_t tx_msg_size;
+       uint16_t rx_msg_size;
+};
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_config {
+       const struct qat_qp_hw_data *hw;
+       uint32_t nb_descriptors;
+       uint32_t cookie_size;
+       int socket_id;
+       build_request_t build_request;
+       const char *service_str;
+};
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+       char            memz_name[RTE_MEMZONE_NAMESIZE];
+       void            *base_addr;             /* Base address */
+       rte_iova_t      base_phys_addr;         /* Queue physical address */
+       uint32_t        head;                   /* Shadow copy of the head */
+       uint32_t        tail;                   /* Shadow copy of the tail */
+       uint32_t        modulo_mask;
+       uint32_t        msg_size;
+       uint16_t        max_inflights;
+       uint32_t        queue_size;
+       uint8_t         hw_bundle_number;
+       uint8_t         hw_queue_number;
+       /* HW queue aka ring offset on bundle */
+       uint32_t        csr_head;               /* last written head value */
+       uint32_t        csr_tail;               /* last written tail value */
+       uint16_t        nb_processed_responses;
+       /* number of responses processed since last CSR head write */
+       uint16_t        nb_pending_requests;
+       /* number of requests pending since last CSR tail write */
+};
+
+struct qat_qp {
+       void                    *mmap_bar_addr;
+       uint16_t                inflights16;
+       struct qat_queue        tx_q;
+       struct qat_queue        rx_q;
+       struct qat_common_stats stats;
+       struct rte_mempool *op_cookie_pool;
+       void **op_cookies;
+       uint32_t nb_descriptors;
+       enum qat_device_gen qat_dev_gen;
+       build_request_t build_request;
+       enum qat_service_type service_type;
+       struct qat_pci_device *qat_dev;
+       /**< qat device this qp is on */
+} __rte_cache_aligned;
+
+extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+int
+qat_qp_release(struct qat_qp **qp_addr);
+
+int
+qat_qp_setup(struct qat_pci_device *qat_dev,
+               struct qat_qp **qp_addr, uint16_t queue_pair_id,
+               struct qat_qp_config *qat_qp_conf);
+
+int
+qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+                       enum qat_service_type service);
+#endif /* _QAT_QP_H_ */
index 1d0c88e..c480cbd 100644 (file)
@@ -8,7 +8,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
deleted file mode 100644 (file)
index 2ad1085..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015-2018 Intel Corporation
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_pmd_qat.a
-
-# library version
-LIBABIVER := 1
-
-# build flags
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -O3
-
-# external library include paths
-CFLAGS += -I$(SRCDIR)/qat_adf
-
-# library common source files
-SRCS-y += qat_device.c
-SRCS-y += qat_common.c
-SRCS-y += qat_logs.c
-SRCS-y += qat_qp.c
-
-# library symmetric crypto source files
-LDLIBS += -lrte_cryptodev
-LDLIBS += -lcrypto
-SRCS-y += qat_sym.c
-SRCS-y += qat_sym_session.c
-SRCS-y += qat_sym_pmd.c
-
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
-LDLIBS += -lrte_pci -lrte_bus_pci
-
-# export include files
-SYMLINK-y-include +=
-
-# versioning export map
-EXPORT_MAP := rte_pmd_qat_version.map
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/README b/drivers/crypto/qat/README
new file mode 100644 (file)
index 0000000..444ae60
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+Makefile for crypto QAT PMD is in common/qat directory.
+The build for the QAT driver is done from there as only one library is built for the
+whole QAT pci device and that library includes all the services (crypto, compression)
+which are enabled on the device.
index 0675426..2873637 100644 (file)
@@ -1,20 +1,24 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017-2018 Intel Corporation
 
-dep = dependency('libcrypto', required: false)
-
 build = false
-sources = files('qat_common.c',
-               'qat_qp.c',
-               'qat_device.c',
-               'qat_logs.c')
-
+dep = dependency('libcrypto', required: false)
 if dep.found()
-       sources += files('qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c')
+       # Add our sources files to the list
+       qat_sources += files('qat_sym_pmd.c',
+                            'qat_sym.c',
+                            'qat_sym_session.c')
+       qat_includes += include_directories('.')
+       qat_deps += 'cryptodev'
+       qat_ext_deps += dep
        pkgconfig_extra_libs += '-lcrypto'
+       qat_cflags += '-DBUILD_QAT_SYM'
+
+       # build the whole driver
+       sources += qat_sources
+       cflags += qat_cflags
+       deps += qat_deps
+       ext_deps += qat_ext_deps
+       includes += qat_includes
        build = true
 endif
-
-includes += include_directories('qat_adf')
-deps += ['bus_pci']
-ext_deps += dep
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
deleted file mode 100644 (file)
index 1eef551..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2018 Intel Corporation
- */
-#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
-#define ADF_TRANSPORT_ACCESS_MACROS_H
-
-#include <rte_io.h>
-
-/* CSR write macro */
-#define ADF_CSR_WR(csrAddr, csrOffset, val)            \
-       rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
-
-/* CSR read macro */
-#define ADF_CSR_RD(csrAddr, csrOffset)                 \
-       rte_read32((((uint8_t *)csrAddr) + csrOffset))
-
-#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG 0x000
-#define ADF_RING_CSR_RING_LBASE 0x040
-#define ADF_RING_CSR_RING_UBASE 0x080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_SRCSEL_2 0x178
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
-#define ADF_RING_BUNDLE_SIZE 0x1000
-#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
-#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
-#define ADF_COALESCING_MIN_TIME 0x1FF
-#define ADF_COALESCING_MAX_TIME 0xFFFFF
-#define ADF_COALESCING_DEF_TIME 0x27FF
-#define ADF_RING_NEAR_WATERMARK_512 0x08
-#define ADF_RING_NEAR_WATERMARK_0 0x00
-#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
-#define ADF_RING_EMPTY_SIG_BYTE 0x7F
-
-/* Valid internal ring size values */
-#define ADF_RING_SIZE_128 0x01
-#define ADF_RING_SIZE_256 0x02
-#define ADF_RING_SIZE_512 0x03
-#define ADF_RING_SIZE_4K 0x06
-#define ADF_RING_SIZE_16K 0x08
-#define ADF_RING_SIZE_4M 0x10
-#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
-#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
-#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-
-/* Maximum number of qps on a device for any service type */
-#define ADF_MAX_QPS_ON_ANY_SERVICE     2
-#define ADF_RING_DIR_TX                        0
-#define ADF_RING_DIR_RX                        1
-
-/* Valid internal msg size values */
-#define ADF_MSG_SIZE_32 0x01
-#define ADF_MSG_SIZE_64 0x02
-#define ADF_MSG_SIZE_128 0x04
-#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
-#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-
-/* Size to bytes conversion macros for ring and msg size values */
-#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
-#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
-#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
-#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-
-/* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
-                               ADF_RING_SIZE_4K : SIZE)
-#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
-#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
-                               SIZE) & ~0x4)
-/* Max outstanding requests */
-#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
-       ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
-#define BUILD_RING_CONFIG(size)        \
-       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
-       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
-       | size)
-#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
-       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
-       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
-       | size)
-#define BUILD_RING_BASE_ADDR(addr, size) \
-       ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-                       ADF_RING_CSR_RING_HEAD + (ring << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-                       ADF_RING_CSR_RING_TAIL + (ring << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-                       ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-               ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
-       uint32_t l_base = 0, u_base = 0; \
-       l_base = (uint32_t)(value & 0xFFFFFFFF); \
-       u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-               ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-               ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
-} while (0)
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-               ADF_RING_CSR_RING_HEAD + (ring << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-               ADF_RING_CSR_RING_TAIL + (ring << 2), value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);  \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-                       ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-                       ADF_RING_CSR_INT_COL_CTL, \
-                       ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
-                       ADF_RING_CSR_INT_FLAG_AND_COL, value)
-
-#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
deleted file mode 100644 (file)
index ae39b7f..0000000
+++ /dev/null
@@ -1,273 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2018 Intel Corporation
- */
-#ifndef _ICP_QAT_FW_H_
-#define _ICP_QAT_FW_H_
-#include <sys/types.h>
-#include "icp_qat_hw.h"
-
-#define QAT_FIELD_SET(flags, val, bitpos, mask) \
-{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
-               (((val) & (mask)) << (bitpos))) ; }
-
-#define QAT_FIELD_GET(flags, bitpos, mask) \
-       (((flags) >> (bitpos)) & (mask))
-
-#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
-#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
-#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
-#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
-#define ICP_QAT_FW_NUM_LONGWORDS_1 1
-#define ICP_QAT_FW_NUM_LONGWORDS_2 2
-#define ICP_QAT_FW_NUM_LONGWORDS_3 3
-#define ICP_QAT_FW_NUM_LONGWORDS_4 4
-#define ICP_QAT_FW_NUM_LONGWORDS_5 5
-#define ICP_QAT_FW_NUM_LONGWORDS_6 6
-#define ICP_QAT_FW_NUM_LONGWORDS_7 7
-#define ICP_QAT_FW_NUM_LONGWORDS_10 10
-#define ICP_QAT_FW_NUM_LONGWORDS_13 13
-#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
-
-enum icp_qat_fw_comn_resp_serv_id {
-       ICP_QAT_FW_COMN_RESP_SERV_NULL,
-       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
-       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
-};
-
-enum icp_qat_fw_comn_request_id {
-       ICP_QAT_FW_COMN_REQ_NULL = 0,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
-       ICP_QAT_FW_COMN_REQ_DELIMITER
-};
-
-struct icp_qat_fw_comn_req_hdr_cd_pars {
-       union {
-               struct {
-                       uint64_t content_desc_addr;
-                       uint16_t content_desc_resrvd1;
-                       uint8_t content_desc_params_sz;
-                       uint8_t content_desc_hdr_resrvd2;
-                       uint32_t content_desc_resrvd3;
-               } s;
-               struct {
-                       uint32_t serv_specif_fields[4];
-               } s1;
-       } u;
-};
-
-struct icp_qat_fw_comn_req_mid {
-       uint64_t opaque_data;
-       uint64_t src_data_addr;
-       uint64_t dest_data_addr;
-       uint32_t src_length;
-       uint32_t dst_length;
-};
-
-struct icp_qat_fw_comn_req_cd_ctrl {
-       uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
-};
-
-struct icp_qat_fw_comn_req_hdr {
-       uint8_t resrvd1;
-       uint8_t service_cmd_id;
-       uint8_t service_type;
-       uint8_t hdr_flags;
-       uint16_t serv_specif_flags;
-       uint16_t comn_req_flags;
-};
-
-struct icp_qat_fw_comn_req_rqpars {
-       uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
-};
-
-struct icp_qat_fw_comn_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
-       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-struct icp_qat_fw_comn_error {
-       uint8_t xlat_err_code;
-       uint8_t cmp_err_code;
-};
-
-struct icp_qat_fw_comn_resp_hdr {
-       uint8_t resrvd1;
-       uint8_t service_id;
-       uint8_t response_type;
-       uint8_t hdr_flags;
-       struct icp_qat_fw_comn_error comn_error;
-       uint8_t comn_status;
-       uint8_t cmd_id;
-};
-
-struct icp_qat_fw_comn_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_hdr;
-       uint64_t opaque_data;
-       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
-#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
-#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
-       icp_qat_fw_comn_req_hdr_t.service_type
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
-       icp_qat_fw_comn_req_hdr_t.service_type = val
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
-       icp_qat_fw_comn_req_hdr_t.service_cmd_id
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
-       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
-       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
-       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
-       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
-       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
-        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
-
-#define QAT_COMN_PTR_TYPE_BITPOS 0
-#define QAT_COMN_PTR_TYPE_MASK 0x1
-#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
-#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
-#define QAT_COMN_PTR_TYPE_FLAT 0x0
-#define QAT_COMN_PTR_TYPE_SGL 0x1
-#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
-#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-
-#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
-       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
-        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
-                       QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
-                       QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
-                       QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
-#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
-#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
-#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
-
-#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
-       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
-
-#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
-
-#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
-#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
-#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
-#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
-#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
-       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
-       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
-       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
-       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
-       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
-       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
-       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
-       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
-
-#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
-       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
-       QAT_COMN_RESP_CMP_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
-       QAT_COMN_RESP_XLAT_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
-       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
-
-#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
-#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
-#define ERR_CODE_NO_ERROR 0
-#define ERR_CODE_INVALID_BLOCK_TYPE -1
-#define ERR_CODE_NO_MATCH_ONES_COMP -2
-#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
-#define ERR_CODE_INCOMPLETE_LEN -4
-#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
-#define ERR_CODE_RPT_GT_SPEC_LEN -6
-#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
-#define ERR_CODE_INV_DIS_CODE_LEN -8
-#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
-#define ERR_CODE_DIS_TOO_FAR_BACK -10
-#define ERR_CODE_OVERFLOW_ERROR -11
-#define ERR_CODE_SOFT_ERROR -12
-#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
-
-enum icp_qat_fw_slice {
-       ICP_QAT_FW_SLICE_NULL = 0,
-       ICP_QAT_FW_SLICE_CIPHER = 1,
-       ICP_QAT_FW_SLICE_AUTH = 2,
-       ICP_QAT_FW_SLICE_DRAM_RD = 3,
-       ICP_QAT_FW_SLICE_DRAM_WR = 4,
-       ICP_QAT_FW_SLICE_COMP = 5,
-       ICP_QAT_FW_SLICE_XLAT = 6,
-       ICP_QAT_FW_SLICE_DELIMITER
-};
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
deleted file mode 100644 (file)
index c33bc3f..0000000
+++ /dev/null
@@ -1,361 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2018 Intel Corporation
- */
-#ifndef _ICP_QAT_FW_LA_H_
-#define _ICP_QAT_FW_LA_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_la_cmd_id {
-       ICP_QAT_FW_LA_CMD_CIPHER = 0,
-       ICP_QAT_FW_LA_CMD_AUTH = 1,
-       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
-       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
-       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
-       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
-       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
-       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
-       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
-       ICP_QAT_FW_LA_CMD_MGF1 = 9,
-       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
-       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
-       ICP_QAT_FW_LA_CMD_DELIMITER = 12
-};
-
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-
-struct icp_qat_fw_la_bulk_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
-       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
-#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
-#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
-#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
-#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
-#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
-#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
-#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
-#define ICP_QAT_FW_LA_GCM_PROTO        2
-#define ICP_QAT_FW_LA_CCM_PROTO        1
-#define ICP_QAT_FW_LA_NO_PROTO 0
-#define QAT_LA_PROTO_BITPOS 7
-#define QAT_LA_PROTO_MASK 0x7
-#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
-#define QAT_LA_CMP_AUTH_RES_BITPOS 6
-#define QAT_LA_CMP_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_RET_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
-#define QAT_LA_RET_AUTH_RES_BITPOS 5
-#define QAT_LA_RET_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_UPDATE_STATE 1
-#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
-#define QAT_LA_UPDATE_STATE_BITPOS 4
-#define QAT_LA_UPDATE_STATE_MASK 0x1
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
-#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
-#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
-#define QAT_LA_CIPH_IV_FLD_BITPOS 2
-#define QAT_LA_CIPH_IV_FLD_MASK   0x1
-#define ICP_QAT_FW_LA_PARTIAL_NONE 0
-#define ICP_QAT_FW_LA_PARTIAL_START 1
-#define ICP_QAT_FW_LA_PARTIAL_MID 3
-#define ICP_QAT_FW_LA_PARTIAL_END 2
-#define QAT_LA_PARTIAL_BITPOS 0
-#define QAT_LA_PARTIAL_MASK 0x3
-#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
-       cmp_auth, ret_auth, update_state, \
-       ciph_iv, ciphcfg, partial) \
-       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
-       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
-       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
-       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
-       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
-       ((proto & QAT_LA_PROTO_MASK) << \
-       QAT_LA_PROTO_BITPOS)    | \
-       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
-       QAT_LA_CMP_AUTH_RES_BITPOS) | \
-       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
-       QAT_LA_RET_AUTH_RES_BITPOS) | \
-       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
-       QAT_LA_UPDATE_STATE_BITPOS) | \
-       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
-       QAT_LA_CIPH_IV_FLD_BITPOS) | \
-       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
-       ((partial & QAT_LA_PARTIAL_MASK) << \
-       QAT_LA_PARTIAL_BITPOS))
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
-       QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
-       QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
-       QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
-       QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
-       QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
-       QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
-       QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
-       QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
-       QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
-       QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
-       QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
-       QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
-       QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
-       QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
-       QAT_LA_PARTIAL_MASK)
-
-struct icp_qat_fw_cipher_req_hdr_cd_pars {
-       union {
-               struct {
-                       uint64_t content_desc_addr;
-                       uint16_t content_desc_resrvd1;
-                       uint8_t content_desc_params_sz;
-                       uint8_t content_desc_hdr_resrvd2;
-                       uint32_t content_desc_resrvd3;
-               } s;
-               struct {
-                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               } s1;
-       } u;
-};
-
-struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
-       union {
-               struct {
-                       uint64_t content_desc_addr;
-                       uint16_t content_desc_resrvd1;
-                       uint8_t content_desc_params_sz;
-                       uint8_t content_desc_hdr_resrvd2;
-                       uint32_t content_desc_resrvd3;
-               } s;
-               struct {
-                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               } sl;
-       } u;
-};
-
-struct icp_qat_fw_cipher_cd_ctrl_hdr {
-       uint8_t cipher_state_sz;
-       uint8_t cipher_key_sz;
-       uint8_t cipher_cfg_offset;
-       uint8_t next_curr_id;
-       uint8_t cipher_padding_sz;
-       uint8_t resrvd1;
-       uint16_t resrvd2;
-       uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
-};
-
-struct icp_qat_fw_auth_cd_ctrl_hdr {
-       uint32_t resrvd1;
-       uint8_t resrvd2;
-       uint8_t hash_flags;
-       uint8_t hash_cfg_offset;
-       uint8_t next_curr_id;
-       uint8_t resrvd3;
-       uint8_t outer_prefix_sz;
-       uint8_t final_sz;
-       uint8_t inner_res_sz;
-       uint8_t resrvd4;
-       uint8_t inner_state1_sz;
-       uint8_t inner_state2_offset;
-       uint8_t inner_state2_sz;
-       uint8_t outer_config_offset;
-       uint8_t outer_state1_sz;
-       uint8_t outer_res_sz;
-       uint8_t outer_prefix_offset;
-};
-
-struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
-       uint8_t cipher_state_sz;
-       uint8_t cipher_key_sz;
-       uint8_t cipher_cfg_offset;
-       uint8_t next_curr_id_cipher;
-       uint8_t cipher_padding_sz;
-       uint8_t hash_flags;
-       uint8_t hash_cfg_offset;
-       uint8_t next_curr_id_auth;
-       uint8_t resrvd1;
-       uint8_t outer_prefix_sz;
-       uint8_t final_sz;
-       uint8_t inner_res_sz;
-       uint8_t resrvd2;
-       uint8_t inner_state1_sz;
-       uint8_t inner_state2_offset;
-       uint8_t inner_state2_sz;
-       uint8_t outer_config_offset;
-       uint8_t outer_state1_sz;
-       uint8_t outer_res_sz;
-       uint8_t outer_prefix_offset;
-};
-
-#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
-#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
-#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
-#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
-       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
-#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
-
-struct icp_qat_fw_la_cipher_req_params {
-       uint32_t cipher_offset;
-       uint32_t cipher_length;
-       union {
-               uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               struct {
-                       uint64_t cipher_IV_ptr;
-                       uint64_t resrvd1;
-               } s;
-       } u;
-};
-
-struct icp_qat_fw_la_auth_req_params {
-       uint32_t auth_off;
-       uint32_t auth_len;
-       union {
-               uint64_t auth_partial_st_prefix;
-               uint64_t aad_adr;
-       } u1;
-       uint64_t auth_res_addr;
-       union {
-               uint8_t inner_prefix_sz;
-               uint8_t aad_sz;
-       } u2;
-       uint8_t resrvd1;
-       uint8_t hash_state_sz;
-       uint8_t auth_res_sz;
-} __rte_packed;
-
-struct icp_qat_fw_la_auth_req_params_resrvd_flds {
-       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
-       union {
-               uint8_t inner_prefix_sz;
-               uint8_t aad_sz;
-       } u2;
-       uint8_t resrvd1;
-       uint16_t resrvd2;
-};
-
-struct icp_qat_fw_la_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_resp;
-       uint64_t opaque_data;
-       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
-         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
-       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
deleted file mode 100644 (file)
index 56e3cf7..0000000
+++ /dev/null
@@ -1,286 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2018 Intel Corporation
- */
-#ifndef _ICP_QAT_HW_H_
-#define _ICP_QAT_HW_H_
-
-enum icp_qat_hw_ae_id {
-       ICP_QAT_HW_AE_0 = 0,
-       ICP_QAT_HW_AE_1 = 1,
-       ICP_QAT_HW_AE_2 = 2,
-       ICP_QAT_HW_AE_3 = 3,
-       ICP_QAT_HW_AE_4 = 4,
-       ICP_QAT_HW_AE_5 = 5,
-       ICP_QAT_HW_AE_6 = 6,
-       ICP_QAT_HW_AE_7 = 7,
-       ICP_QAT_HW_AE_8 = 8,
-       ICP_QAT_HW_AE_9 = 9,
-       ICP_QAT_HW_AE_10 = 10,
-       ICP_QAT_HW_AE_11 = 11,
-       ICP_QAT_HW_AE_DELIMITER = 12
-};
-
-enum icp_qat_hw_qat_id {
-       ICP_QAT_HW_QAT_0 = 0,
-       ICP_QAT_HW_QAT_1 = 1,
-       ICP_QAT_HW_QAT_2 = 2,
-       ICP_QAT_HW_QAT_3 = 3,
-       ICP_QAT_HW_QAT_4 = 4,
-       ICP_QAT_HW_QAT_5 = 5,
-       ICP_QAT_HW_QAT_DELIMITER = 6
-};
-
-enum icp_qat_hw_auth_algo {
-       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
-       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
-       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
-       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
-       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
-       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
-       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
-       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
-       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
-       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
-       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
-       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
-       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
-       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
-       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
-       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
-       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
-       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
-       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
-       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
-       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
-};
-
-enum icp_qat_hw_auth_mode {
-       ICP_QAT_HW_AUTH_MODE0 = 0,
-       ICP_QAT_HW_AUTH_MODE1 = 1,
-       ICP_QAT_HW_AUTH_MODE2 = 2,
-       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
-};
-
-struct icp_qat_hw_auth_config {
-       uint32_t config;
-       uint32_t reserved;
-};
-
-#define QAT_AUTH_MODE_BITPOS 4
-#define QAT_AUTH_MODE_MASK 0xF
-#define QAT_AUTH_ALGO_BITPOS 0
-#define QAT_AUTH_ALGO_MASK 0xF
-#define QAT_AUTH_CMP_BITPOS 8
-#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
-#define QAT_AUTH_ALGO_SHA3_BITPOS 22
-#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
-       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
-       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
-       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
-        QAT_AUTH_ALGO_SHA3_BITPOS) | \
-        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
-       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
-       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
-       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
-
-struct icp_qat_hw_auth_counter {
-       uint32_t counter;
-       uint32_t reserved;
-};
-
-#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
-#define QAT_AUTH_COUNT_BITPOS 0
-#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
-       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
-
-struct icp_qat_hw_auth_setup {
-       struct icp_qat_hw_auth_config auth_config;
-       struct icp_qat_hw_auth_counter auth_counter;
-};
-
-#define QAT_HW_DEFAULT_ALIGNMENT 8
-#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
-#define ICP_QAT_HW_NULL_STATE1_SZ 32
-#define ICP_QAT_HW_MD5_STATE1_SZ 16
-#define ICP_QAT_HW_SHA1_STATE1_SZ 20
-#define ICP_QAT_HW_SHA224_STATE1_SZ 32
-#define ICP_QAT_HW_SHA256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA384_STATE1_SZ 64
-#define ICP_QAT_HW_SHA512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
-#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
-#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
-#define ICP_QAT_HW_NULL_STATE2_SZ 32
-#define ICP_QAT_HW_MD5_STATE2_SZ 16
-#define ICP_QAT_HW_SHA1_STATE2_SZ 20
-#define ICP_QAT_HW_SHA224_STATE2_SZ 32
-#define ICP_QAT_HW_SHA256_STATE2_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
-#define ICP_QAT_HW_SHA384_STATE2_SZ 64
-#define ICP_QAT_HW_SHA512_STATE2_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
-#define ICP_QAT_HW_F9_IK_SZ 16
-#define ICP_QAT_HW_F9_FK_SZ 16
-#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
-       ICP_QAT_HW_F9_FK_SZ)
-#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
-#define ICP_QAT_HW_GALOIS_H_SZ 16
-#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
-#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
-
-struct icp_qat_hw_auth_sha512 {
-       struct icp_qat_hw_auth_setup inner_setup;
-       uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
-       struct icp_qat_hw_auth_setup outer_setup;
-       uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
-};
-
-struct icp_qat_hw_auth_algo_blk {
-       struct icp_qat_hw_auth_sha512 sha;
-};
-
-#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
-#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
-
-enum icp_qat_hw_cipher_algo {
-       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
-       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
-       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
-       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
-       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
-       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
-       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
-       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
-       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
-       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
-       ICP_QAT_HW_CIPHER_DELIMITER = 10
-};
-
-enum icp_qat_hw_cipher_mode {
-       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
-       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
-       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
-       ICP_QAT_HW_CIPHER_F8_MODE = 3,
-       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
-       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
-};
-
-struct icp_qat_hw_cipher_config {
-       uint32_t val;
-       uint32_t reserved;
-};
-
-enum icp_qat_hw_cipher_dir {
-       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
-       ICP_QAT_HW_CIPHER_DECRYPT = 1,
-};
-
-enum icp_qat_hw_auth_op {
-       ICP_QAT_HW_AUTH_VERIFY = 0,
-       ICP_QAT_HW_AUTH_GENERATE = 1,
-};
-
-enum icp_qat_hw_cipher_convert {
-       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
-       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
-};
-
-#define QAT_CIPHER_MODE_BITPOS 4
-#define QAT_CIPHER_MODE_MASK 0xF
-#define QAT_CIPHER_ALGO_BITPOS 0
-#define QAT_CIPHER_ALGO_MASK 0xF
-#define QAT_CIPHER_CONVERT_BITPOS 9
-#define QAT_CIPHER_CONVERT_MASK 0x1
-#define QAT_CIPHER_DIR_BITPOS 8
-#define QAT_CIPHER_DIR_MASK 0x1
-#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
-#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
-#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
-       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
-       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
-       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
-       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
-#define ICP_QAT_HW_DES_BLK_SZ 8
-#define ICP_QAT_HW_3DES_BLK_SZ 8
-#define ICP_QAT_HW_NULL_BLK_SZ 8
-#define ICP_QAT_HW_AES_BLK_SZ 16
-#define ICP_QAT_HW_KASUMI_BLK_SZ 8
-#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
-#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
-#define ICP_QAT_HW_NULL_KEY_SZ 256
-#define ICP_QAT_HW_DES_KEY_SZ 8
-#define ICP_QAT_HW_3DES_KEY_SZ 24
-#define ICP_QAT_HW_AES_128_KEY_SZ 16
-#define ICP_QAT_HW_AES_192_KEY_SZ 24
-#define ICP_QAT_HW_AES_256_KEY_SZ 32
-#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_KASUMI_KEY_SZ 16
-#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_ARC4_KEY_SZ 256
-#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
-#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-
-#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
-
-/* These defines describe position of the bit-fields
- * in the flags byte in B0
- */
-#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT      6
-#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT          3
-
-#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q)                  \
-       ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
-       | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
-       | ((q) - 1))
-
-#define ICP_QAT_HW_CCM_NQ_CONST 15
-#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
-#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
-#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
-               ICP_QAT_HW_CCM_AAD_LEN_INFO)
-#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
-#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
-#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
-
-struct icp_qat_hw_cipher_algo_blk {
-       struct icp_qat_hw_cipher_config cipher_config;
-       uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
-} __rte_cache_aligned;
-
-#endif
diff --git a/drivers/crypto/qat/qat_common.c b/drivers/crypto/qat/qat_common.c
deleted file mode 100644 (file)
index c206d3b..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
- */
-
-#include "qat_common.h"
-#include "qat_device.h"
-#include "qat_logs.h"
-
-int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
-               struct qat_sgl *list, uint32_t data_len)
-{
-       int nr = 1;
-
-       uint32_t buf_len = rte_pktmbuf_iova(buf) -
-                       buf_start + rte_pktmbuf_data_len(buf);
-
-       list->buffers[0].addr = buf_start;
-       list->buffers[0].resrvd = 0;
-       list->buffers[0].len = buf_len;
-
-       if (data_len <= buf_len) {
-               list->num_bufs = nr;
-               list->buffers[0].len = data_len;
-               return 0;
-       }
-
-       buf = buf->next;
-       while (buf) {
-               if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
-                       QAT_LOG(ERR,
-                               "QAT PMD exceeded size of QAT SGL entry(%u)",
-                                       QAT_SGL_MAX_NUMBER);
-                       return -EINVAL;
-               }
-
-               list->buffers[nr].len = rte_pktmbuf_data_len(buf);
-               list->buffers[nr].resrvd = 0;
-               list->buffers[nr].addr = rte_pktmbuf_iova(buf);
-
-               buf_len += list->buffers[nr].len;
-               buf = buf->next;
-
-               if (buf_len > data_len) {
-                       list->buffers[nr].len -=
-                               buf_len - data_len;
-                       buf = NULL;
-               }
-               ++nr;
-       }
-       list->num_bufs = nr;
-
-       return 0;
-}
-
-void qat_stats_get(struct qat_pci_device *dev,
-               struct qat_common_stats *stats,
-               enum qat_service_type service)
-{
-       int i;
-       struct qat_qp **qp;
-
-       if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
-               QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
-                               stats, dev, service);
-               return;
-       }
-
-       qp = dev->qps_in_use[service];
-       for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
-               if (qp[i] == NULL) {
-                       QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
-                                       service, i);
-                       continue;
-               }
-
-               stats->enqueued_count += qp[i]->stats.enqueued_count;
-               stats->dequeued_count += qp[i]->stats.dequeued_count;
-               stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
-               stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
-       }
-}
-
-void qat_stats_reset(struct qat_pci_device *dev,
-               enum qat_service_type service)
-{
-       int i;
-       struct qat_qp **qp;
-
-       if (dev == NULL || service >= QAT_SERVICE_INVALID) {
-               QAT_LOG(ERR, "invalid param: dev %p, service %d",
-                               dev, service);
-               return;
-       }
-
-       qp = dev->qps_in_use[service];
-       for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
-               if (qp[i] == NULL) {
-                       QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
-                                       service, i);
-                       continue;
-               }
-               memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
-       }
-
-       QAT_LOG(DEBUG, "QAT: %d stats cleared", service);
-}
diff --git a/drivers/crypto/qat/qat_common.h b/drivers/crypto/qat/qat_common.h
deleted file mode 100644 (file)
index db85d54..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
- */
-#ifndef _QAT_COMMON_H_
-#define _QAT_COMMON_H_
-
-#include <stdint.h>
-
-#include <rte_mbuf.h>
-
-/**< Intel(R) QAT device name for PCI registration */
-#define QAT_PCI_NAME   qat
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER     16
-
-#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
-
-/* Intel(R) QuickAssist Technology device generation is enumerated
- * from one according to the generation of the device
- */
-enum qat_device_gen {
-       QAT_GEN1 = 1,
-       QAT_GEN2
-};
-
-enum qat_service_type {
-       QAT_SERVICE_ASYMMETRIC = 0,
-       QAT_SERVICE_SYMMETRIC,
-       QAT_SERVICE_COMPRESSION,
-       QAT_SERVICE_INVALID
-};
-#define QAT_MAX_SERVICES               (QAT_SERVICE_INVALID)
-
-/**< Common struct for scatter-gather list operations */
-struct qat_flat_buf {
-       uint32_t len;
-       uint32_t resrvd;
-       uint64_t addr;
-} __rte_packed;
-
-struct qat_sgl {
-       uint64_t resrvd;
-       uint32_t num_bufs;
-       uint32_t num_mapped_bufs;
-       struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-/** Common, i.e. not service-specific, statistics */
-struct qat_common_stats {
-       uint64_t enqueued_count;
-       /**< Count of all operations enqueued */
-       uint64_t dequeued_count;
-       /**< Count of all operations dequeued */
-
-       uint64_t enqueue_err_count;
-       /**< Total error count on operations enqueued */
-       uint64_t dequeue_err_count;
-       /**< Total error count on operations dequeued */
-};
-
-struct qat_pci_device;
-
-int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
-               struct qat_sgl *list, uint32_t data_len);
-void
-qat_stats_get(struct qat_pci_device *dev,
-               struct qat_common_stats *stats,
-               enum qat_service_type service);
-void
-qat_stats_reset(struct qat_pci_device *dev,
-               enum qat_service_type service);
-
-#endif /* _QAT_COMMON_H_ */
diff --git a/drivers/crypto/qat/qat_device.c b/drivers/crypto/qat/qat_device.c
deleted file mode 100644 (file)
index 64f236e..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
- */
-
-#include "qat_device.h"
-#include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
-
-/* Hardware device information per generation */
-__extension__
-struct qat_gen_hw_data qat_gen_config[] =  {
-       [QAT_GEN1] = {
-               .dev_gen = QAT_GEN1,
-               .qp_hw_data = qat_gen1_qps,
-       },
-       [QAT_GEN2] = {
-               .dev_gen = QAT_GEN2,
-               .qp_hw_data = qat_gen1_qps,
-               /* gen2 has same ring layout as gen1 */
-       },
-};
-
-
-static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
-static int qat_nb_pci_devices;
-
-/*
- * The set of PCI devices this driver supports
- */
-
-static const struct rte_pci_id pci_id_qat_map[] = {
-               {
-                       RTE_PCI_DEVICE(0x8086, 0x0443),
-               },
-               {
-                       RTE_PCI_DEVICE(0x8086, 0x37c9),
-               },
-               {
-                       RTE_PCI_DEVICE(0x8086, 0x19e3),
-               },
-               {
-                       RTE_PCI_DEVICE(0x8086, 0x6f55),
-               },
-               {.device_id = 0},
-};
-
-
-static struct qat_pci_device *
-qat_pci_get_dev(uint8_t dev_id)
-{
-       return &qat_pci_devices[dev_id];
-}
-
-static struct qat_pci_device *
-qat_pci_get_named_dev(const char *name)
-{
-       struct qat_pci_device *dev;
-       unsigned int i;
-
-       if (name == NULL)
-               return NULL;
-
-       for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
-               dev = &qat_pci_devices[i];
-
-               if ((dev->attached == QAT_ATTACHED) &&
-                               (strcmp(dev->name, name) == 0))
-                       return dev;
-       }
-
-       return NULL;
-}
-
-static uint8_t
-qat_pci_find_free_device_index(void)
-{
-       uint8_t dev_id;
-
-       for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
-               if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
-                       break;
-       }
-       return dev_id;
-}
-
-struct qat_pci_device *
-qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
-{
-       char name[QAT_DEV_NAME_MAX_LEN];
-
-       rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-
-       return qat_pci_get_named_dev(name);
-}
-
-struct qat_pci_device *
-qat_pci_device_allocate(struct rte_pci_device *pci_dev)
-{
-       struct qat_pci_device *qat_dev;
-       uint8_t qat_dev_id;
-       char name[QAT_DEV_NAME_MAX_LEN];
-
-       rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-       snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
-       if (qat_pci_get_named_dev(name) != NULL) {
-               QAT_LOG(ERR, "QAT device with name %s already allocated!",
-                               name);
-               return NULL;
-       }
-
-       qat_dev_id = qat_pci_find_free_device_index();
-       if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
-               QAT_LOG(ERR, "Reached maximum number of QAT devices");
-               return NULL;
-       }
-
-       qat_dev = qat_pci_get_dev(qat_dev_id);
-       memset(qat_dev, 0, sizeof(*qat_dev));
-       snprintf(qat_dev->name, QAT_DEV_NAME_MAX_LEN, "%s", name);
-       qat_dev->qat_dev_id = qat_dev_id;
-       qat_dev->pci_dev = pci_dev;
-       switch (qat_dev->pci_dev->id.device_id) {
-       case 0x0443:
-               qat_dev->qat_dev_gen = QAT_GEN1;
-               break;
-       case 0x37c9:
-       case 0x19e3:
-       case 0x6f55:
-               qat_dev->qat_dev_gen = QAT_GEN2;
-               break;
-       default:
-               QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
-               return NULL;
-       }
-
-       rte_spinlock_init(&qat_dev->arb_csr_lock);
-
-       qat_dev->attached = QAT_ATTACHED;
-
-       qat_nb_pci_devices++;
-
-       QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
-                       qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
-
-       return qat_dev;
-}
-
-int
-qat_pci_device_release(struct rte_pci_device *pci_dev)
-{
-       struct qat_pci_device *qat_dev;
-       char name[QAT_DEV_NAME_MAX_LEN];
-
-       if (pci_dev == NULL)
-               return -EINVAL;
-
-       rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-       snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
-       qat_dev = qat_pci_get_named_dev(name);
-       if (qat_dev != NULL) {
-
-               /* Check that there are no service devs still on pci device */
-               if (qat_dev->sym_dev != NULL)
-                       return -EBUSY;
-
-               qat_dev->attached = QAT_DETACHED;
-               qat_nb_pci_devices--;
-       }
-       QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
-                               name, qat_nb_pci_devices);
-       return 0;
-}
-
-static int
-qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
-               struct rte_pci_device *pci_dev)
-{
-       qat_sym_dev_destroy(qat_pci_dev);
-       qat_comp_dev_destroy(qat_pci_dev);
-       qat_asym_dev_destroy(qat_pci_dev);
-       return qat_pci_device_release(pci_dev);
-}
-
-static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-               struct rte_pci_device *pci_dev)
-{
-       int ret = 0;
-       struct qat_pci_device *qat_pci_dev;
-
-       QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
-                       pci_dev->addr.bus,
-                       pci_dev->addr.devid,
-                       pci_dev->addr.function);
-
-       qat_pci_dev = qat_pci_device_allocate(pci_dev);
-       if (qat_pci_dev == NULL)
-               return -ENODEV;
-
-       ret = qat_sym_dev_create(qat_pci_dev);
-       if (ret != 0)
-               goto error_out;
-
-       ret = qat_comp_dev_create(qat_pci_dev);
-       if (ret != 0)
-               goto error_out;
-
-       ret = qat_asym_dev_create(qat_pci_dev);
-       if (ret != 0)
-               goto error_out;
-
-       return 0;
-
-error_out:
-       qat_pci_dev_destroy(qat_pci_dev, pci_dev);
-       return ret;
-
-}
-
-static int qat_pci_remove(struct rte_pci_device *pci_dev)
-{
-       struct qat_pci_device *qat_pci_dev;
-
-       if (pci_dev == NULL)
-               return -EINVAL;
-
-       qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
-       if (qat_pci_dev == NULL)
-               return 0;
-
-       return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
-}
-
-static struct rte_pci_driver rte_qat_pmd = {
-       .id_table = pci_id_qat_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
-       .probe = qat_pci_probe,
-       .remove = qat_pci_remove
-};
-
-__attribute__((weak)) int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
-{
-       return 0;
-}
-
-__attribute__((weak)) int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
-{
-       return 0;
-}
-
-__attribute__((weak)) int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
-{
-       return 0;
-}
-
-__attribute__((weak)) int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
-{
-       return 0;
-}
-
-__attribute__((weak)) int
-qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
-{
-       return 0;
-}
-
-__attribute__((weak)) int
-qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
-{
-       return 0;
-}
-
-RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
diff --git a/drivers/crypto/qat/qat_device.h b/drivers/crypto/qat/qat_device.h
deleted file mode 100644 (file)
index 0cb370c..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
- */
-#ifndef _QAT_DEVICE_H_
-#define _QAT_DEVICE_H_
-
-#include <rte_bus_pci.h>
-
-#include "qat_common.h"
-#include "qat_logs.h"
-#include "adf_transport_access_macros.h"
-#include "qat_qp.h"
-
-#define QAT_DETACHED  (0)
-#define QAT_ATTACHED  (1)
-
-#define QAT_DEV_NAME_MAX_LEN   64
-
-/*
- * This struct holds all the data about a QAT pci device
- * including data about all services it supports.
- * It contains
- *  - hw_data
- *  - config data
- *  - runtime data
- */
-struct qat_sym_dev_private;
-struct qat_pci_device {
-
-       /* Data used by all services */
-       char name[QAT_DEV_NAME_MAX_LEN];
-       /**< Name of qat pci device */
-       uint8_t qat_dev_id;
-       /**< Device instance for this qat pci device */
-       struct rte_pci_device *pci_dev;
-       /**< PCI information. */
-       enum qat_device_gen qat_dev_gen;
-       /**< QAT device generation */
-       rte_spinlock_t arb_csr_lock;
-       /**< lock to protect accesses to the arbiter CSR */
-       __extension__
-       uint8_t attached : 1;
-       /**< Flag indicating the device is attached */
-
-       struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
-       /**< links to qps set up for each service, index same as on API */
-
-       /* Data relating to symmetric crypto service */
-       struct qat_sym_dev_private *sym_dev;
-       /**< link back to cryptodev private data */
-       struct rte_device sym_rte_dev;
-       /**< This represents the crypto subset of this pci device.
-        * Register with this rather than with the one in
-        * pci_dev so that its driver can have a crypto-specific name
-        */
-
-       /* Data relating to compression service */
-
-       /* Data relating to asymmetric crypto service */
-
-};
-
-struct qat_gen_hw_data {
-       enum qat_device_gen dev_gen;
-       const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
-};
-
-extern struct qat_gen_hw_data qat_gen_config[];
-
-struct qat_pci_device *
-qat_pci_device_allocate(struct rte_pci_device *pci_dev);
-
-int
-qat_pci_device_release(struct rte_pci_device *pci_dev);
-
-struct qat_pci_device *
-qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
-
-/* declaration needed for weak functions */
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
-
-int
-qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
-
-int
-qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
-
-#endif /* _QAT_DEVICE_H_ */
diff --git a/drivers/crypto/qat/qat_logs.c b/drivers/crypto/qat/qat_logs.c
deleted file mode 100644 (file)
index fa3df85..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
- */
-
-#include <rte_log.h>
-#include <rte_hexdump.h>
-
-#include "qat_logs.h"
-
-int qat_gen_logtype;
-int qat_dp_logtype;
-
-int
-qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
-               const void *buf, unsigned int len)
-{
-       if (level > rte_log_get_global_level())
-               return 0;
-       if (level > (uint32_t)(rte_log_get_level(logtype)))
-               return 0;
-
-       rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file,
-                               title, buf, len);
-       return 0;
-}
-
-RTE_INIT(qat_pci_init_log);
-static void
-qat_pci_init_log(void)
-{
-       /* Non-data-path logging for pci device and all services */
-       qat_gen_logtype = rte_log_register("pmd.qat_general");
-       if (qat_gen_logtype >= 0)
-               rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE);
-
-       /* data-path logging for all services */
-       qat_dp_logtype = rte_log_register("pmd.qat_dp");
-       if (qat_dp_logtype >= 0)
-               rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE);
-}
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
deleted file mode 100644 (file)
index 4baea12..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_LOGS_H_
-#define _QAT_LOGS_H_
-
-extern int qat_gen_logtype;
-extern int qat_dp_logtype;
-
-#define QAT_LOG(level, fmt, args...)                   \
-       rte_log(RTE_LOG_ ## level, qat_gen_logtype,             \
-                       "%s(): " fmt "\n", __func__, ## args)
-
-#define QAT_DP_LOG(level, fmt, args...)                        \
-       rte_log(RTE_LOG_ ## level, qat_dp_logtype,              \
-                       "%s(): " fmt "\n", __func__, ## args)
-
-#define QAT_DP_HEXDUMP_LOG(level, title, buf, len)             \
-       qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len)
-
-/**
- * qat_hexdump_log - Dump out memory in a special hex dump format.
- *
- * Dump out the message buffer in a special hex dump output format with
- * characters printed for each line of 16 hex values. The message will be sent
- * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file
- * is undefined.
- */
-int
-qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
-               const void *buf, unsigned int len);
-
-#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
deleted file mode 100644 (file)
index 32c1759..0000000
+++ /dev/null
@@ -1,635 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-#include <rte_atomic.h>
-#include <rte_prefetch.h>
-
-#include "qat_logs.h"
-#include "qat_device.h"
-#include "qat_qp.h"
-#include "qat_sym.h"
-#include "adf_transport_access_macros.h"
-
-
-#define ADF_MAX_DESC                           4096
-#define ADF_MIN_DESC                           128
-
-#define ADF_ARB_REG_SLOT                       0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET            0x19C
-
-#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
-       (ADF_ARB_REG_SLOT * index), value)
-
-__extension__
-const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
-                                        [ADF_MAX_QPS_ON_ANY_SERVICE] = {
-       /* queue pairs which provide an asymmetric crypto service */
-       [QAT_SERVICE_ASYMMETRIC] = {
-               {
-                       .service_type = QAT_SERVICE_ASYMMETRIC,
-                       .hw_bundle_num = 0,
-                       .tx_ring_num = 0,
-                       .rx_ring_num = 8,
-                       .tx_msg_size = 64,
-                       .rx_msg_size = 32,
-
-               }, {
-                       .service_type = QAT_SERVICE_ASYMMETRIC,
-                       .hw_bundle_num = 0,
-                       .tx_ring_num = 1,
-                       .rx_ring_num = 9,
-                       .tx_msg_size = 64,
-                       .rx_msg_size = 32,
-               }
-       },
-       /* queue pairs which provide a symmetric crypto service */
-       [QAT_SERVICE_SYMMETRIC] = {
-               {
-                       .service_type = QAT_SERVICE_SYMMETRIC,
-                       .hw_bundle_num = 0,
-                       .tx_ring_num = 2,
-                       .rx_ring_num = 10,
-                       .tx_msg_size = 128,
-                       .rx_msg_size = 32,
-               },
-               {
-                       .service_type = QAT_SERVICE_SYMMETRIC,
-                       .hw_bundle_num = 0,
-                       .tx_ring_num = 3,
-                       .rx_ring_num = 11,
-                       .tx_msg_size = 128,
-                       .rx_msg_size = 32,
-               }
-       },
-       /* queue pairs which provide a compression service */
-       [QAT_SERVICE_COMPRESSION] = {
-               {
-                       .service_type = QAT_SERVICE_COMPRESSION,
-                       .hw_bundle_num = 0,
-                       .tx_ring_num = 6,
-                       .rx_ring_num = 14,
-                       .tx_msg_size = 128,
-                       .rx_msg_size = 32,
-               }, {
-                       .service_type = QAT_SERVICE_COMPRESSION,
-                       .hw_bundle_num = 0,
-                       .tx_ring_num = 7,
-                       .rx_ring_num = 15,
-                       .tx_msg_size = 128,
-                       .rx_msg_size = 32,
-               }
-       }
-};
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
-       uint32_t queue_size_bytes);
-static void qat_queue_delete(struct qat_queue *queue);
-static int qat_queue_create(struct qat_pci_device *qat_dev,
-       struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
-       uint32_t *queue_size_for_csr);
-static void adf_configure_queues(struct qat_qp *queue);
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
-       rte_spinlock_t *lock);
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
-       rte_spinlock_t *lock);
-
-
-int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
-               enum qat_service_type service)
-{
-       int i, count;
-
-       for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
-               if (qp_hw_data[i].service_type == service)
-                       count++;
-       return count;
-}
-
-static const struct rte_memzone *
-queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
-                       int socket_id)
-{
-       const struct rte_memzone *mz;
-
-       mz = rte_memzone_lookup(queue_name);
-       if (mz != 0) {
-               if (((size_t)queue_size <= mz->len) &&
-                               ((socket_id == SOCKET_ID_ANY) ||
-                                       (socket_id == mz->socket_id))) {
-                       QAT_LOG(DEBUG, "re-use memzone already "
-                                       "allocated for %s", queue_name);
-                       return mz;
-               }
-
-               QAT_LOG(ERR, "Incompatible memzone already "
-                               "allocated %s, size %u, socket %d. "
-                               "Requested size %u, socket %u",
-                               queue_name, (uint32_t)mz->len,
-                               mz->socket_id, queue_size, socket_id);
-               return NULL;
-       }
-
-       QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
-                                       queue_name, queue_size, socket_id);
-       return rte_memzone_reserve_aligned(queue_name, queue_size,
-               socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
-}
-
-int qat_qp_setup(struct qat_pci_device *qat_dev,
-               struct qat_qp **qp_addr,
-               uint16_t queue_pair_id,
-               struct qat_qp_config *qat_qp_conf)
-
-{
-       struct qat_qp *qp;
-       struct rte_pci_device *pci_dev = qat_dev->pci_dev;
-       char op_cookie_pool_name[RTE_RING_NAMESIZE];
-       uint32_t i;
-
-       QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
-               queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
-
-       if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
-               (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
-               QAT_LOG(ERR, "Can't create qp for %u descriptors",
-                               qat_qp_conf->nb_descriptors);
-               return -EINVAL;
-       }
-
-       if (pci_dev->mem_resource[0].addr == NULL) {
-               QAT_LOG(ERR, "Could not find VF config space "
-                               "(UIO driver attached?).");
-               return -EINVAL;
-       }
-
-       /* Allocate the queue pair data structure. */
-       qp = rte_zmalloc("qat PMD qp metadata",
-                       sizeof(*qp), RTE_CACHE_LINE_SIZE);
-       if (qp == NULL) {
-               QAT_LOG(ERR, "Failed to alloc mem for qp struct");
-               return -ENOMEM;
-       }
-       qp->nb_descriptors = qat_qp_conf->nb_descriptors;
-       qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
-                       qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
-                       RTE_CACHE_LINE_SIZE);
-       if (qp->op_cookies == NULL) {
-               QAT_LOG(ERR, "Failed to alloc mem for cookie");
-               rte_free(qp);
-               return -ENOMEM;
-       }
-
-       qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
-       qp->inflights16 = 0;
-
-       if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
-                                       ADF_RING_DIR_TX) != 0) {
-               QAT_LOG(ERR, "Tx queue create failed "
-                               "queue_pair_id=%u", queue_pair_id);
-               goto create_err;
-       }
-
-       if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
-                                       ADF_RING_DIR_RX) != 0) {
-               QAT_LOG(ERR, "Rx queue create failed "
-                               "queue_pair_id=%hu", queue_pair_id);
-               qat_queue_delete(&(qp->tx_q));
-               goto create_err;
-       }
-
-       adf_configure_queues(qp);
-       adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
-                                       &qat_dev->arb_csr_lock);
-
-       snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
-                                       "%s%d_cookies_%s_qp%hu",
-               pci_dev->driver->driver.name, qat_dev->qat_dev_id,
-               qat_qp_conf->service_str, queue_pair_id);
-
-       QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
-       qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
-       if (qp->op_cookie_pool == NULL)
-               qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
-                               qp->nb_descriptors,
-                               qat_qp_conf->cookie_size, 64, 0,
-                               NULL, NULL, NULL, NULL, qat_qp_conf->socket_id,
-                               0);
-       if (!qp->op_cookie_pool) {
-               QAT_LOG(ERR, "QAT PMD Cannot create"
-                               " op mempool");
-               goto create_err;
-       }
-
-       for (i = 0; i < qp->nb_descriptors; i++) {
-               if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
-                       QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
-                       goto create_err;
-               }
-       }
-
-       qp->qat_dev_gen = qat_dev->qat_dev_gen;
-       qp->build_request = qat_qp_conf->build_request;
-       qp->service_type = qat_qp_conf->hw->service_type;
-       qp->qat_dev = qat_dev;
-
-       QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
-                       queue_pair_id, op_cookie_pool_name);
-
-       *qp_addr = qp;
-       return 0;
-
-create_err:
-       if (qp->op_cookie_pool)
-               rte_mempool_free(qp->op_cookie_pool);
-       rte_free(qp->op_cookies);
-       rte_free(qp);
-       return -EFAULT;
-}
-
-int qat_qp_release(struct qat_qp **qp_addr)
-{
-       struct qat_qp *qp = *qp_addr;
-       uint32_t i;
-
-       if (qp == NULL) {
-               QAT_LOG(DEBUG, "qp already freed");
-               return 0;
-       }
-
-       QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
-                               qp->qat_dev->qat_dev_id);
-
-       /* Don't free memory if there are still responses to be processed */
-       if (qp->inflights16 == 0) {
-               qat_queue_delete(&(qp->tx_q));
-               qat_queue_delete(&(qp->rx_q));
-       } else {
-               return -EAGAIN;
-       }
-
-       adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
-                                       &qp->qat_dev->arb_csr_lock);
-
-       for (i = 0; i < qp->nb_descriptors; i++)
-               rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
-
-       if (qp->op_cookie_pool)
-               rte_mempool_free(qp->op_cookie_pool);
-
-       rte_free(qp->op_cookies);
-       rte_free(qp);
-       *qp_addr = NULL;
-       return 0;
-}
-
-
-static void qat_queue_delete(struct qat_queue *queue)
-{
-       const struct rte_memzone *mz;
-       int status = 0;
-
-       if (queue == NULL) {
-               QAT_LOG(DEBUG, "Invalid queue");
-               return;
-       }
-       QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
-                       queue->hw_queue_number, queue->memz_name);
-
-       mz = rte_memzone_lookup(queue->memz_name);
-       if (mz != NULL) {
-               /* Write an unused pattern to the queue memory. */
-               memset(queue->base_addr, 0x7F, queue->queue_size);
-               status = rte_memzone_free(mz);
-               if (status != 0)
-                       QAT_LOG(ERR, "Error %d on freeing queue %s",
-                                       status, queue->memz_name);
-       } else {
-               QAT_LOG(DEBUG, "queue %s doesn't exist",
-                               queue->memz_name);
-       }
-}
-
-static int
-qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
-               struct qat_qp_config *qp_conf, uint8_t dir)
-{
-       uint64_t queue_base;
-       void *io_addr;
-       const struct rte_memzone *qp_mz;
-       struct rte_pci_device *pci_dev = qat_dev->pci_dev;
-       int ret = 0;
-       uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
-                       qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
-       uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
-
-       queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
-       queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
-                       qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
-
-       if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
-               QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
-               return -EINVAL;
-       }
-
-       /*
-        * Allocate a memzone for the queue - create a unique name.
-        */
-       snprintf(queue->memz_name, sizeof(queue->memz_name),
-                       "%s_%d_%s_%s_%d_%d",
-               pci_dev->driver->driver.name, qat_dev->qat_dev_id,
-               qp_conf->service_str, "qp_mem",
-               queue->hw_bundle_number, queue->hw_queue_number);
-       qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
-                       qp_conf->socket_id);
-       if (qp_mz == NULL) {
-               QAT_LOG(ERR, "Failed to allocate ring memzone");
-               return -ENOMEM;
-       }
-
-       queue->base_addr = (char *)qp_mz->addr;
-       queue->base_phys_addr = qp_mz->iova;
-       if (qat_qp_check_queue_alignment(queue->base_phys_addr,
-                       queue_size_bytes)) {
-               QAT_LOG(ERR, "Invalid alignment on queue create "
-                                       " 0x%"PRIx64"\n",
-                                       queue->base_phys_addr);
-               ret = -EFAULT;
-               goto queue_create_err;
-       }
-
-       if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
-                       &(queue->queue_size)) != 0) {
-               QAT_LOG(ERR, "Invalid num inflights");
-               ret = -EINVAL;
-               goto queue_create_err;
-       }
-
-       queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
-                                       ADF_BYTES_TO_MSG_SIZE(desc_size));
-       queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
-
-       if (queue->max_inflights < 2) {
-               QAT_LOG(ERR, "Invalid num inflights");
-               ret = -EINVAL;
-               goto queue_create_err;
-       }
-       queue->head = 0;
-       queue->tail = 0;
-       queue->msg_size = desc_size;
-
-       /*
-        * Write an unused pattern to the queue memory.
-        */
-       memset(queue->base_addr, 0x7F, queue_size_bytes);
-
-       queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
-                                       queue->queue_size);
-
-       io_addr = pci_dev->mem_resource[0].addr;
-
-       WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
-                       queue->hw_queue_number, queue_base);
-
-       QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
-               " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
-                       queue->memz_name,
-                       queue->queue_size, queue_size_bytes,
-                       qp_conf->nb_descriptors, desc_size,
-                       queue->max_inflights, queue->modulo_mask);
-
-       return 0;
-
-queue_create_err:
-       rte_memzone_free(qp_mz);
-       return ret;
-}
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
-                                       uint32_t queue_size_bytes)
-{
-       if (((queue_size_bytes - 1) & phys_addr) != 0)
-               return -EINVAL;
-       return 0;
-}
-
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
-       uint32_t *p_queue_size_for_csr)
-{
-       uint8_t i = ADF_MIN_RING_SIZE;
-
-       for (; i <= ADF_MAX_RING_SIZE; i++)
-               if ((msg_size * msg_num) ==
-                               (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
-                       *p_queue_size_for_csr = i;
-                       return 0;
-               }
-       QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
-       return -EINVAL;
-}
-
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
-                                       rte_spinlock_t *lock)
-{
-       uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
-                                       (ADF_ARB_REG_SLOT *
-                                                       txq->hw_bundle_number);
-       uint32_t value;
-
-       rte_spinlock_lock(lock);
-       value = ADF_CSR_RD(base_addr, arb_csr_offset);
-       value |= (0x01 << txq->hw_queue_number);
-       ADF_CSR_WR(base_addr, arb_csr_offset, value);
-       rte_spinlock_unlock(lock);
-}
-
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
-                                       rte_spinlock_t *lock)
-{
-       uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
-                                       (ADF_ARB_REG_SLOT *
-                                                       txq->hw_bundle_number);
-       uint32_t value;
-
-       rte_spinlock_lock(lock);
-       value = ADF_CSR_RD(base_addr, arb_csr_offset);
-       value &= ~(0x01 << txq->hw_queue_number);
-       ADF_CSR_WR(base_addr, arb_csr_offset, value);
-       rte_spinlock_unlock(lock);
-}
-
-static void adf_configure_queues(struct qat_qp *qp)
-{
-       uint32_t queue_config;
-       struct qat_queue *queue = &qp->tx_q;
-
-       queue_config = BUILD_RING_CONFIG(queue->queue_size);
-
-       WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
-                       queue->hw_queue_number, queue_config);
-
-       queue = &qp->rx_q;
-       queue_config =
-                       BUILD_RESP_RING_CONFIG(queue->queue_size,
-                                       ADF_RING_NEAR_WATERMARK_512,
-                                       ADF_RING_NEAR_WATERMARK_0);
-
-       WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
-                       queue->hw_queue_number, queue_config);
-}
-
-static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
-{
-       return data & modulo_mask;
-}
-
-static inline void
-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
-       WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
-                       q->hw_queue_number, q->tail);
-       q->nb_pending_requests = 0;
-       q->csr_tail = q->tail;
-}
-
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
-{
-       uint32_t old_head, new_head;
-       uint32_t max_head;
-
-       old_head = q->csr_head;
-       new_head = q->head;
-       max_head = qp->nb_descriptors * q->msg_size;
-
-       /* write out free descriptors */
-       void *cur_desc = (uint8_t *)q->base_addr + old_head;
-
-       if (new_head < old_head) {
-               memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
-               memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
-       } else {
-               memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
-       }
-       q->nb_processed_responses = 0;
-       q->csr_head = new_head;
-
-       /* write current head to CSR */
-       WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
-                           q->hw_queue_number, new_head);
-}
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
-{
-       register struct qat_queue *queue;
-       struct qat_qp *tmp_qp = (struct qat_qp *)qp;
-       register uint32_t nb_ops_sent = 0;
-       register int ret;
-       uint16_t nb_ops_possible = nb_ops;
-       register uint8_t *base_addr;
-       register uint32_t tail;
-       int overflow;
-
-       if (unlikely(nb_ops == 0))
-               return 0;
-
-       /* read params used a lot in main loop into registers */
-       queue = &(tmp_qp->tx_q);
-       base_addr = (uint8_t *)queue->base_addr;
-       tail = queue->tail;
-
-       /* Find how many can actually fit on the ring */
-       tmp_qp->inflights16 += nb_ops;
-       overflow = tmp_qp->inflights16 - queue->max_inflights;
-       if (overflow > 0) {
-               tmp_qp->inflights16 -= overflow;
-               nb_ops_possible = nb_ops - overflow;
-               if (nb_ops_possible == 0)
-                       return 0;
-       }
-
-       while (nb_ops_sent != nb_ops_possible) {
-               ret = tmp_qp->build_request(*ops, base_addr + tail,
-                               tmp_qp->op_cookies[tail / queue->msg_size],
-                               tmp_qp->qat_dev_gen);
-               if (ret != 0) {
-                       tmp_qp->stats.enqueue_err_count++;
-                       /*
-                        * This message cannot be enqueued,
-                        * decrease number of ops that wasn't sent
-                        */
-                       tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
-                       if (nb_ops_sent == 0)
-                               return 0;
-                       goto kick_tail;
-               }
-
-               tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
-               ops++;
-               nb_ops_sent++;
-       }
-kick_tail:
-       queue->tail = tail;
-       tmp_qp->stats.enqueued_count += nb_ops_sent;
-       queue->nb_pending_requests += nb_ops_sent;
-       if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
-                   queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
-               txq_write_tail(tmp_qp, queue);
-       }
-       return nb_ops_sent;
-}
-
-uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
-{
-       struct qat_queue *rx_queue, *tx_queue;
-       struct qat_qp *tmp_qp = (struct qat_qp *)qp;
-       uint32_t head;
-       uint32_t resp_counter = 0;
-       uint8_t *resp_msg;
-
-       rx_queue = &(tmp_qp->rx_q);
-       tx_queue = &(tmp_qp->tx_q);
-       head = rx_queue->head;
-       resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
-
-       while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
-                       resp_counter != nb_ops) {
-
-               if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-                       qat_sym_process_response(ops, resp_msg);
-               /* add qat_asym_process_response here */
-               /* add qat_comp_process_response here */
-
-               head = adf_modulo(head + rx_queue->msg_size,
-                                 rx_queue->modulo_mask);
-
-               resp_msg = (uint8_t *)rx_queue->base_addr + head;
-               ops++;
-               resp_counter++;
-       }
-       if (resp_counter > 0) {
-               rx_queue->head = head;
-               tmp_qp->stats.dequeued_count += resp_counter;
-               rx_queue->nb_processed_responses += resp_counter;
-               tmp_qp->inflights16 -= resp_counter;
-
-               if (rx_queue->nb_processed_responses >
-                                               QAT_CSR_HEAD_WRITE_THRESH)
-                       rxq_free_desc(tmp_qp, rx_queue);
-       }
-       /* also check if tail needs to be advanced */
-       if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
-               tx_queue->tail != tx_queue->csr_tail) {
-               txq_write_tail(tmp_qp, tx_queue);
-       }
-       return resp_counter;
-}
diff --git a/drivers/crypto/qat/qat_qp.h b/drivers/crypto/qat/qat_qp.h
deleted file mode 100644 (file)
index 59db945..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
- */
-#ifndef _QAT_QP_H_
-#define _QAT_QP_H_
-
-#include "qat_common.h"
-#include "adf_transport_access_macros.h"
-
-struct qat_pci_device;
-
-#define QAT_CSR_HEAD_WRITE_THRESH 32U
-/* number of requests to accumulate before writing head CSR */
-#define QAT_CSR_TAIL_WRITE_THRESH 32U
-/* number of requests to accumulate before writing tail CSR */
-#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
-/* number of inflights below which no tail write coalescing should occur */
-
-typedef int (*build_request_t)(void *op,
-               uint8_t *req, void *op_cookie,
-               enum qat_device_gen qat_dev_gen);
-/**< Build a request from an op. */
-
-/**
- * Structure with data needed for creation of queue pair.
- */
-struct qat_qp_hw_data {
-       enum qat_service_type service_type;
-       uint8_t hw_bundle_num;
-       uint8_t tx_ring_num;
-       uint8_t rx_ring_num;
-       uint16_t tx_msg_size;
-       uint16_t rx_msg_size;
-};
-/**
- * Structure with data needed for creation of queue pair.
- */
-struct qat_qp_config {
-       const struct qat_qp_hw_data *hw;
-       uint32_t nb_descriptors;
-       uint32_t cookie_size;
-       int socket_id;
-       build_request_t build_request;
-       const char *service_str;
-};
-
-/**
- * Structure associated with each queue.
- */
-struct qat_queue {
-       char            memz_name[RTE_MEMZONE_NAMESIZE];
-       void            *base_addr;             /* Base address */
-       rte_iova_t      base_phys_addr;         /* Queue physical address */
-       uint32_t        head;                   /* Shadow copy of the head */
-       uint32_t        tail;                   /* Shadow copy of the tail */
-       uint32_t        modulo_mask;
-       uint32_t        msg_size;
-       uint16_t        max_inflights;
-       uint32_t        queue_size;
-       uint8_t         hw_bundle_number;
-       uint8_t         hw_queue_number;
-       /* HW queue aka ring offset on bundle */
-       uint32_t        csr_head;               /* last written head value */
-       uint32_t        csr_tail;               /* last written tail value */
-       uint16_t        nb_processed_responses;
-       /* number of responses processed since last CSR head write */
-       uint16_t        nb_pending_requests;
-       /* number of requests pending since last CSR tail write */
-};
-
-struct qat_qp {
-       void                    *mmap_bar_addr;
-       uint16_t                inflights16;
-       struct qat_queue        tx_q;
-       struct qat_queue        rx_q;
-       struct qat_common_stats stats;
-       struct rte_mempool *op_cookie_pool;
-       void **op_cookies;
-       uint32_t nb_descriptors;
-       enum qat_device_gen qat_dev_gen;
-       build_request_t build_request;
-       enum qat_service_type service_type;
-       struct qat_pci_device *qat_dev;
-       /**< qat device this qp is on */
-} __rte_cache_aligned;
-
-extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
-uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
-int
-qat_qp_release(struct qat_qp **qp_addr);
-
-int
-qat_qp_setup(struct qat_pci_device *qat_dev,
-               struct qat_qp **qp_addr, uint16_t queue_pair_id,
-               struct qat_qp_config *qat_qp_conf);
-
-int
-qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
-                       enum qat_service_type service);
-#endif /* _QAT_QP_H_ */
index 126c191..d425892 100644 (file)
@@ -6,6 +6,8 @@
 #define _QAT_SYM_H_
 
 #include <rte_cryptodev_pmd.h>
+
+#ifdef BUILD_QAT_SYM
 #include <openssl/evp.h>
 
 #include "qat_common.h"
@@ -152,5 +154,11 @@ qat_sym_process_response(void **op, uint8_t *resp)
        }
        *op = (void *)rx_op;
 }
+#else
 
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
 #endif /* _QAT_SYM_H_ */
index 1e2344c..439aeb9 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _QAT_SYM_PMD_H_
 #define _QAT_SYM_PMD_H_
 
+#ifdef BUILD_QAT_SYM
+
 #include <rte_cryptodev.h>
 
 #include "qat_sym_capabilities.h"
@@ -36,4 +38,5 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev);
 int
 qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
 
+#endif
 #endif /* _QAT_SYM_PMD_H_ */