.socket_id = qat_dev_instance->pci_dev->device.numa_node,
};
char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
struct rte_compressdev *compressdev;
struct qat_comp_dev_private *comp_dev;
+ const struct rte_compressdev_capabilities *capabilities;
+ uint64_t capa_size;
snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "comp");
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "QAT_COMP_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
comp_dev = compressdev->data->dev_private;
comp_dev->qat_dev = qat_pci_dev;
comp_dev->compressdev = compressdev;
case QAT_GEN1:
case QAT_GEN2:
case QAT_GEN3:
- comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ capabilities = qat_comp_gen_capabilities;
+ capa_size = sizeof(qat_comp_gen_capabilities);
break;
default:
+ capabilities = qat_comp_gen_capabilities;
+ capa_size = sizeof(qat_comp_gen_capabilities);
QAT_LOG(DEBUG,
"QAT gen %d capabilities unknown, default to GEN1",
qat_pci_dev->qat_dev_gen);
break;
}
+ comp_dev->capa_mz = rte_memzone_reserve(capa_memz_name,
+ capa_size,
+ rte_socket_id(), 0);
+ if (comp_dev->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, destroying PMD for %s",
+ name);
+ memset(&qat_dev_instance->comp_rte_dev, 0,
+ sizeof(qat_dev_instance->comp_rte_dev));
+ rte_compressdev_pmd_destroy(compressdev);
+ return -EFAULT;
+ }
+
+ memcpy(comp_dev->capa_mz->addr, capabilities, capa_size);
+ comp_dev->qat_dev_capabilities = comp_dev->capa_mz->addr;
+
while (1) {
if (qat_dev_cmd_param[i].name == NULL)
break;
if (comp_dev == NULL)
return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->comp_dev->capa_mz);
+
/* clean up any resources used by the device */
qat_comp_dev_close(comp_dev->compressdev);
/**< The device's pool for qat_comp_xforms */
struct rte_mempool *streampool;
/**< The device's pool for qat_comp_streams */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
uint16_t min_enq_burst_threshold;
};
.private_data_size = sizeof(struct qat_asym_dev_private)
};
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *cryptodev;
struct qat_asym_dev_private *internals;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "QAT_ASYM_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
internals->asym_dev_id = cryptodev->data->dev_id;
internals->qat_dev_capabilities = qat_gen1_asym_capabilities;
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ sizeof(qat_gen1_asym_capabilities),
+ rte_socket_id(), 0);
+ }
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, destroying PMD for %s",
+ name);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->asym_rte_dev, 0,
+ sizeof(qat_dev_instance->asym_rte_dev));
+ return -EFAULT;
+ }
+
+ memcpy(internals->capa_mz->addr, qat_gen1_asym_capabilities,
+ sizeof(qat_gen1_asym_capabilities));
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
+
while (1) {
if (qat_dev_cmd_param[i].name == NULL)
break;
return -ENODEV;
if (qat_pci_dev->asym_dev == NULL)
return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
/* free crypto device */
cryptodev = rte_cryptodev_pmd_get_dev(
/**< Device instance for this rte_cryptodev */
const struct rte_cryptodev_capabilities *qat_dev_capabilities;
/* QAT device asymmetric crypto capabilities */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
uint16_t min_enq_burst_threshold;
};
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+static const struct rte_cryptodev_capabilities qat_gen3_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
#ifdef RTE_LIBRTE_SECURITY
static const struct rte_cryptodev_capabilities
qat_security_sym_capabilities[] = {
.private_data_size = sizeof(struct qat_sym_dev_private)
};
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *cryptodev;
struct qat_sym_dev_private *internals;
+ const struct rte_cryptodev_capabilities *capabilities;
+ uint64_t capa_size;
/*
* All processes must use same driver id so they can share sessions.
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "QAT_SYM_CAPA_GEN_%d",
+ qat_pci_dev->qat_dev_gen);
+
#ifdef RTE_LIBRTE_SECURITY
security_instance = rte_malloc("qat_sec",
sizeof(struct rte_security_ctx),
internals->sym_dev_id = cryptodev->data->dev_id;
switch (qat_pci_dev->qat_dev_gen) {
case QAT_GEN1:
- internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
+ capabilities = qat_gen1_sym_capabilities;
+ capa_size = sizeof(qat_gen1_sym_capabilities);
break;
case QAT_GEN2:
+ capabilities = qat_gen2_sym_capabilities;
+ capa_size = sizeof(qat_gen2_sym_capabilities);
+ break;
case QAT_GEN3:
- internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ capabilities = qat_gen3_sym_capabilities;
+ capa_size = sizeof(qat_gen3_sym_capabilities);
break;
default:
QAT_LOG(DEBUG,
- "QAT gen %d capabilities unknown, default to GEN2",
- qat_pci_dev->qat_dev_gen);
- break;
+ "QAT gen %d capabilities unknown",
+ qat_pci_dev->qat_dev_gen);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->sym_rte_dev, 0,
+ sizeof(qat_dev_instance->sym_rte_dev));
+ return -(EINVAL);
+ }
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ capa_size,
+ rte_socket_id(), 0);
}
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities, destroying PMD for %s",
+ name);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&qat_dev_instance->sym_rte_dev, 0,
+ sizeof(qat_dev_instance->sym_rte_dev));
+ return -EFAULT;
+ }
+
+ memcpy(internals->capa_mz->addr, capabilities, capa_size);
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
while (1) {
if (qat_dev_cmd_param[i].name == NULL)
return -ENODEV;
if (qat_pci_dev->sym_dev == NULL)
return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
/* free crypto device */
cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
/**< Device instance for this rte_cryptodev */
const struct rte_cryptodev_capabilities *qat_dev_capabilities;
/* QAT device symmetric crypto capabilities */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
uint16_t min_enq_burst_threshold;
uint32_t internal_capabilities; /* see flags QAT_SYM_CAP_xxx */
};