--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_dev_gens.h"
+
+#include <stdint.h>
+
+#define ADF_ARB_REG_SLOT 0x1000
+
+int
+qat_reset_ring_pairs_gen1(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ /*
+ * Ring pairs reset not supported on base, continue
+ */
+ return 0;
+}
+
+const struct rte_mem_resource *
+qat_dev_get_transport_bar_gen1(struct rte_pci_device *pci_dev)
+{
+ return &pci_dev->mem_resource[0];
+}
+
+int
+qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource __rte_unused,
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ return -1;
+}
+
+int
+qat_dev_read_config_gen1(struct qat_pci_device *qat_dev __rte_unused)
+{
+ /*
+ * Base generations do not have configuration,
+ * but set this pointer anyway that we can
+ * distinguish higher generations faulty set to NULL
+ */
+ return 0;
+}
+
+int
+qat_dev_get_extra_size_gen1(void)
+{
+ return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen1 = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
+ .qat_dev_read_config = qat_dev_read_config_gen1,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
+};
+
+RTE_INIT(qat_dev_gen_gen1_init)
+{
+ qat_dev_hw_spec[QAT_GEN1] = &qat_dev_hw_spec_gen1;
+ qat_gen_config[QAT_GEN1].dev_gen = QAT_GEN1;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_dev_gens.h"
+
+#include <stdint.h>
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen2 = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
+ .qat_dev_read_config = qat_dev_read_config_gen1,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
+};
+
+RTE_INIT(qat_dev_gen_gen2_init)
+{
+ qat_dev_hw_spec[QAT_GEN2] = &qat_dev_hw_spec_gen2;
+ qat_gen_config[QAT_GEN2].dev_gen = QAT_GEN2;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_dev_gens.h"
+
+#include <stdint.h>
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen3 = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
+ .qat_dev_read_config = qat_dev_read_config_gen1,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
+};
+
+RTE_INIT(qat_dev_gen_gen3_init)
+{
+ qat_dev_hw_spec[QAT_GEN3] = &qat_dev_hw_spec_gen3;
+ qat_gen_config[QAT_GEN3].dev_gen = QAT_GEN3;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_dev.h>
+#include <rte_pci.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen4vf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+#include "qat_dev_gens.h"
+
+#include <stdint.h>
+
+struct qat_dev_gen4_extra {
+ struct qat_qp_hw_data qp_gen4_data[QAT_GEN4_BUNDLE_NUM]
+ [QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen4 = {
+ .pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+ .vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+ .pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+ .pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+ .pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+ .pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+int
+qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val)
+{
+ struct qat_pf2vf_msg pf2vf_msg;
+
+ pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ;
+ pf2vf_msg.block_hdr = ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ;
+ pf2vf_msg.msg_data = 2;
+ return qat_pf2vf_exch_msg(qat_dev, pf2vf_msg, 2, val);
+}
+
+static enum qat_service_type
+gen4_pick_service(uint8_t hw_service)
+{
+ switch (hw_service) {
+ case QAT_SVC_SYM:
+ return QAT_SERVICE_SYMMETRIC;
+ case QAT_SVC_COMPRESSION:
+ return QAT_SERVICE_COMPRESSION;
+ case QAT_SVC_ASYM:
+ return QAT_SERVICE_ASYMMETRIC;
+ default:
+ return QAT_SERVICE_INVALID;
+ }
+}
+
+static int
+qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
+{
+ int i = 0;
+ uint16_t svc = 0;
+ struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
+ struct qat_qp_hw_data *hw_data;
+ enum qat_service_type service_type;
+ uint8_t hw_service;
+
+ if (qat_query_svc_gen4(qat_dev, (uint8_t *)&svc))
+ return -EFAULT;
+ for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
+ hw_service = (svc >> (3 * i)) & 0x7;
+ service_type = gen4_pick_service(hw_service);
+ if (service_type == QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR,
+ "Unrecognized service on bundle %d",
+ i);
+ return -ENOTSUP;
+ }
+ hw_data = &dev_extra->qp_gen4_data[i][0];
+ memset(hw_data, 0, sizeof(*hw_data));
+ hw_data->service_type = service_type;
+ if (service_type == QAT_SERVICE_ASYMMETRIC) {
+ hw_data->tx_msg_size = 64;
+ hw_data->rx_msg_size = 32;
+ } else if (service_type == QAT_SERVICE_SYMMETRIC ||
+ service_type ==
+ QAT_SERVICE_COMPRESSION) {
+ hw_data->tx_msg_size = 128;
+ hw_data->rx_msg_size = 32;
+ }
+ hw_data->tx_ring_num = 0;
+ hw_data->rx_ring_num = 1;
+ hw_data->hw_bundle_num = i;
+ }
+ return 0;
+}
+
+static int
+qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
+{
+ int ret = 0, i;
+ uint8_t data[4];
+ struct qat_pf2vf_msg pf2vf_msg;
+
+ pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_RP_RESET;
+ pf2vf_msg.block_hdr = -1;
+ for (i = 0; i < QAT_GEN4_BUNDLE_NUM; i++) {
+ pf2vf_msg.msg_data = i;
+ ret = qat_pf2vf_exch_msg(qat_pci_dev, pf2vf_msg, 1, data);
+ if (ret) {
+ QAT_LOG(ERR, "QAT error when reset bundle no %d",
+ i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct
+rte_mem_resource *qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
+{
+ return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
+ struct rte_pci_device *pci_dev)
+{
+ *mem_resource = &pci_dev->mem_resource[2];
+ return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen4(void)
+{
+ return sizeof(struct qat_dev_gen4_extra);
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen4 = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen4,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
+ .qat_dev_read_config = qat_dev_read_config_gen4,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
+};
+
+RTE_INIT(qat_dev_gen_4_init)
+{
+ qat_dev_hw_spec[QAT_GEN4] = &qat_dev_hw_spec_gen4;
+ qat_gen_config[QAT_GEN4].dev_gen = QAT_GEN4;
+ qat_gen_config[QAT_GEN4].pf2vf_dev = &qat_pf2vf_gen4;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef _QAT_DEV_GENS_H_
+#define _QAT_DEV_GENS_H_
+
+#include "qat_device.h"
+#include "qat_qp.h"
+
+#include <stdint.h>
+
+extern const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE];
+
+int
+qat_dev_get_extra_size_gen1(void);
+
+int
+qat_reset_ring_pairs_gen1(
+ struct qat_pci_device *qat_pci_dev);
+const struct
+rte_mem_resource *qat_dev_get_transport_bar_gen1(
+ struct rte_pci_device *pci_dev);
+int
+qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource,
+ struct rte_pci_device *pci_dev);
+int
+qat_dev_read_config_gen1(struct qat_pci_device *qat_dev);
+
+int
+qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val);
+
+#endif
'qat_device.c',
'qat_logs.c',
'qat_pf2vf.c',
+ 'dev/qat_dev_gen1.c',
+ 'dev/qat_dev_gen2.c',
+ 'dev/qat_dev_gen3.c',
+ 'dev/qat_dev_gen4.c',
)
includes += include_directories(
'qat_adf',
struct qat_gen_hw_data qat_gen_config[QAT_N_GENS];
struct qat_dev_hw_spec_funcs *qat_dev_hw_spec[QAT_N_GENS];
-/* pv2vf data Gen 4*/
-struct qat_pf2vf_dev qat_pf2vf_gen4 = {
- .pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
- .vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
- .pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
- .pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
- .pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
- .pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
-};
-
-/* Hardware device information per generation */
-__extension__
-struct qat_gen_hw_data qat_gen_config[] = {
- [QAT_GEN1] = {
- .dev_gen = QAT_GEN1,
- .qp_hw_data = qat_gen1_qps,
- .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN1
- },
- [QAT_GEN2] = {
- .dev_gen = QAT_GEN2,
- .qp_hw_data = qat_gen1_qps,
- /* gen2 has same ring layout as gen1 */
- .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN2
- },
- [QAT_GEN3] = {
- .dev_gen = QAT_GEN3,
- .qp_hw_data = qat_gen3_qps,
- .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3
- },
- [QAT_GEN4] = {
- .dev_gen = QAT_GEN4,
- .qp_hw_data = NULL,
- .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3,
- .pf2vf_dev = &qat_pf2vf_gen4
- },
-};
-
/* per-process array of device data */
struct qat_device_info qat_pci_devs[RTE_PMD_QAT_MAX_PCI_DEVICES];
static int qat_nb_pci_devices;
{.device_id = 0},
};
+static int
+qat_pci_get_extra_size(enum qat_device_gen qat_dev_gen)
+{
+ struct qat_dev_hw_spec_funcs *ops_hw =
+ qat_dev_hw_spec[qat_dev_gen];
+ RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_get_extra_size,
+ -ENOTSUP);
+ return ops_hw->qat_dev_get_extra_size();
+}
+
static struct qat_pci_device *
qat_pci_get_named_dev(const char *name)
{
return qat_pci_get_named_dev(name);
}
-static int
-qat_gen4_reset_ring_pair(struct qat_pci_device *qat_pci_dev)
-{
- int ret = 0, i;
- uint8_t data[4];
- struct qat_pf2vf_msg pf2vf_msg;
-
- pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_RP_RESET;
- pf2vf_msg.block_hdr = -1;
- for (i = 0; i < QAT_GEN4_BUNDLE_NUM; i++) {
- pf2vf_msg.msg_data = i;
- ret = qat_pf2vf_exch_msg(qat_pci_dev, pf2vf_msg, 1, data);
- if (ret) {
- QAT_LOG(ERR, "QAT error when reset bundle no %d",
- i);
- return ret;
- }
- }
-
- return 0;
-}
-
-int qat_query_svc(struct qat_pci_device *qat_dev, uint8_t *val)
-{
- int ret = -(EINVAL);
- struct qat_pf2vf_msg pf2vf_msg;
-
- if (qat_dev->qat_dev_gen == QAT_GEN4) {
- pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ;
- pf2vf_msg.block_hdr = ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ;
- pf2vf_msg.msg_data = 2;
- ret = qat_pf2vf_exch_msg(qat_dev, pf2vf_msg, 2, val);
- }
-
- return ret;
-}
-
-
-static void qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param
+static void
+qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param
*qat_dev_cmd_param)
{
int i = 0;
struct qat_dev_cmd_param *qat_dev_cmd_param)
{
struct qat_pci_device *qat_dev;
+ enum qat_device_gen qat_dev_gen;
uint8_t qat_dev_id = 0;
char name[QAT_DEV_NAME_MAX_LEN];
struct rte_devargs *devargs = pci_dev->device.devargs;
+ struct qat_dev_hw_spec_funcs *ops_hw;
+ struct rte_mem_resource *mem_resource;
+ const struct rte_memzone *qat_dev_mz;
+ int qat_dev_size, extra_size;
rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ switch (pci_dev->id.device_id) {
+ case 0x0443:
+ qat_dev_gen = QAT_GEN1;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ case 0x18ef:
+ qat_dev_gen = QAT_GEN2;
+ break;
+ case 0x18a1:
+ qat_dev_gen = QAT_GEN3;
+ break;
+ case 0x4941:
+ qat_dev_gen = QAT_GEN4;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+ return NULL;
+ }
+
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
const struct rte_memzone *mz = rte_memzone_lookup(name);
return NULL;
}
- qat_pci_devs[qat_dev_id].mz = rte_memzone_reserve(name,
- sizeof(struct qat_pci_device),
+ extra_size = qat_pci_get_extra_size(qat_dev_gen);
+ if (extra_size < 0) {
+ QAT_LOG(ERR, "QAT internal error: no pci pointer for gen %d",
+ qat_dev_gen);
+ return NULL;
+ }
+
+ qat_dev_size = sizeof(struct qat_pci_device) + extra_size;
+ qat_dev_mz = rte_memzone_reserve(name, qat_dev_size,
rte_socket_id(), 0);
- if (qat_pci_devs[qat_dev_id].mz == NULL) {
+ if (qat_dev_mz == NULL) {
QAT_LOG(ERR, "Error when allocating memzone for QAT_%d",
qat_dev_id);
return NULL;
}
- qat_dev = qat_pci_devs[qat_dev_id].mz->addr;
- memset(qat_dev, 0, sizeof(*qat_dev));
+ qat_dev = qat_dev_mz->addr;
+ memset(qat_dev, 0, qat_dev_size);
+ qat_dev->dev_private = qat_dev + 1;
strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
qat_dev->qat_dev_id = qat_dev_id;
qat_pci_devs[qat_dev_id].pci_dev = pci_dev;
- switch (pci_dev->id.device_id) {
- case 0x0443:
- qat_dev->qat_dev_gen = QAT_GEN1;
- break;
- case 0x37c9:
- case 0x19e3:
- case 0x6f55:
- case 0x18ef:
- qat_dev->qat_dev_gen = QAT_GEN2;
- break;
- case 0x18a1:
- qat_dev->qat_dev_gen = QAT_GEN3;
- break;
- case 0x4941:
- qat_dev->qat_dev_gen = QAT_GEN4;
- break;
- default:
- QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
- rte_memzone_free(qat_pci_devs[qat_dev->qat_dev_id].mz);
+ qat_dev->qat_dev_gen = qat_dev_gen;
+
+ ops_hw = qat_dev_hw_spec[qat_dev->qat_dev_gen];
+ if (ops_hw->qat_dev_get_misc_bar == NULL) {
+ QAT_LOG(ERR, "qat_dev_get_misc_bar function pointer not set");
+ rte_memzone_free(qat_dev_mz);
return NULL;
}
-
- if (qat_dev->qat_dev_gen == QAT_GEN4) {
- qat_dev->misc_bar_io_addr = pci_dev->mem_resource[2].addr;
- if (qat_dev->misc_bar_io_addr == NULL) {
+ if (ops_hw->qat_dev_get_misc_bar(&mem_resource, pci_dev) == 0) {
+ if (mem_resource->addr == NULL) {
QAT_LOG(ERR, "QAT cannot get access to VF misc bar");
+ rte_memzone_free(qat_dev_mz);
return NULL;
}
- }
+ qat_dev->misc_bar_io_addr = mem_resource->addr;
+ } else
+ qat_dev->misc_bar_io_addr = NULL;
if (devargs && devargs->drv_str)
qat_dev_parse_cmd(devargs->drv_str, qat_dev_cmd_param);
- if (qat_dev->qat_dev_gen >= QAT_GEN4) {
- if (qat_read_qp_config(qat_dev)) {
- QAT_LOG(ERR,
- "Cannot acquire ring configuration for QAT_%d",
- qat_dev_id);
- return NULL;
- }
+ if (qat_read_qp_config(qat_dev)) {
+ QAT_LOG(ERR,
+ "Cannot acquire ring configuration for QAT_%d",
+ qat_dev_id);
+ rte_memzone_free(qat_dev_mz);
+ return NULL;
}
+ /* No errors when allocating, attach memzone with
+ * qat_dev to list of devices
+ */
+ qat_pci_devs[qat_dev_id].mz = qat_dev_mz;
+
rte_spinlock_init(&qat_dev->arb_csr_lock);
qat_nb_pci_devices++;
int sym_ret = 0, asym_ret = 0, comp_ret = 0;
int num_pmds_created = 0;
struct qat_pci_device *qat_pci_dev;
+ struct qat_dev_hw_spec_funcs *ops_hw;
struct qat_dev_cmd_param qat_dev_cmd_param[] = {
{ SYM_ENQ_THRESHOLD_NAME, 0 },
{ ASYM_ENQ_THRESHOLD_NAME, 0 },
if (qat_pci_dev == NULL)
return -ENODEV;
- if (qat_pci_dev->qat_dev_gen == QAT_GEN4) {
- if (qat_gen4_reset_ring_pair(qat_pci_dev)) {
- QAT_LOG(ERR,
- "Cannot reset ring pairs, does pf driver supports pf2vf comms?"
- );
- return -ENODEV;
- }
+ ops_hw = qat_dev_hw_spec[qat_pci_dev->qat_dev_gen];
+ RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_reset_ring_pairs,
+ -ENOTSUP);
+ if (ops_hw->qat_dev_reset_ring_pairs(qat_pci_dev)) {
+ QAT_LOG(ERR,
+ "Cannot reset ring pairs, does pf driver supports pf2vf comms?"
+ );
+ return -ENODEV;
}
sym_ret = qat_sym_dev_create(qat_pci_dev, qat_dev_cmd_param);
return 0;
}
-static int qat_pci_remove(struct rte_pci_device *pci_dev)
+static int
+qat_pci_remove(struct rte_pci_device *pci_dev)
{
struct qat_pci_device *qat_pci_dev;
/**< Data of ring configuration on gen4 */
void *misc_bar_io_addr;
/**< Address of misc bar */
+ void *dev_private;
+ /**< Per generation specific information */
};
struct qat_gen_hw_data {
int
qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
-int
-qat_query_svc(struct qat_pci_device *qat_pci_dev, uint8_t *ret);
-
#endif /* _QAT_DEVICE_H_ */
#include "qat_comp.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_access_macros_gen4vf.h"
+#include "dev/qat_dev_gens.h"
#define QAT_CQ_MAX_DEQ_RETRIES 10
if (qat_dev_gen == QAT_GEN4) {
uint16_t svc = 0;
- if (qat_query_svc(qat_dev, (uint8_t *)&svc))
+ if (qat_query_svc_gen4(qat_dev, (uint8_t *)&svc))
return -(EFAULT);
for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
struct qat_qp_hw_data *hw_data =