* Added support for lookaside protocol offload for DOCSIS through the
``rte_security`` API.
+ * Improved handling of multi process in QAT crypto and compression PMDs.
* **Updated the OCTEON TX2 crypto PMD.**
},
};
-
-static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
+/* per-process array of device data */
+struct qat_device_info qat_pci_devs[RTE_PMD_QAT_MAX_PCI_DEVICES];
static int qat_nb_pci_devices;
/*
{.device_id = 0},
};
-static struct qat_pci_device *
-qat_pci_get_dev(uint8_t dev_id)
-{
- return &qat_pci_devices[dev_id];
-}
-
static struct qat_pci_device *
qat_pci_get_named_dev(const char *name)
{
- struct qat_pci_device *dev;
unsigned int i;
if (name == NULL)
return NULL;
for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
- dev = &qat_pci_devices[i];
-
- if ((dev->attached == QAT_ATTACHED) &&
- (strcmp(dev->name, name) == 0))
- return dev;
+ if (qat_pci_devs[i].mz &&
+ (strcmp(((struct qat_pci_device *)
+ qat_pci_devs[i].mz->addr)->name, name)
+ == 0))
+ return (struct qat_pci_device *)
+ qat_pci_devs[i].mz->addr;
}
return NULL;
static uint8_t
qat_pci_find_free_device_index(void)
{
- uint8_t dev_id;
+ uint8_t dev_id;
- for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
- if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
- break;
- }
- return dev_id;
+ for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES;
+ dev_id++) {
+ if (qat_pci_devs[dev_id].mz == NULL)
+ break;
+ }
+ return dev_id;
}
struct qat_pci_device *
struct qat_dev_cmd_param *qat_dev_cmd_param)
{
struct qat_pci_device *qat_dev;
- uint8_t qat_dev_id;
+ uint8_t qat_dev_id = 0;
char name[QAT_DEV_NAME_MAX_LEN];
struct rte_devargs *devargs = pci_dev->device.devargs;
rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ const struct rte_memzone *mz = rte_memzone_lookup(name);
+
+ if (mz == NULL) {
+ QAT_LOG(ERR,
+ "Secondary can't find %s mz, did primary create device?",
+ name);
+ return NULL;
+ }
+ qat_dev = mz->addr;
+ qat_pci_devs[qat_dev->qat_dev_id].mz = mz;
+ qat_pci_devs[qat_dev->qat_dev_id].pci_dev = pci_dev;
+ qat_nb_pci_devices++;
+ QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
+ qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
+ return qat_dev;
+ }
+
if (qat_pci_get_named_dev(name) != NULL) {
QAT_LOG(ERR, "QAT device with name %s already allocated!",
name);
return NULL;
}
- qat_dev = qat_pci_get_dev(qat_dev_id);
+ qat_pci_devs[qat_dev_id].mz = rte_memzone_reserve(name,
+ sizeof(struct qat_pci_device),
+ rte_socket_id(), 0);
+
+ if (qat_pci_devs[qat_dev_id].mz == NULL) {
+ QAT_LOG(ERR, "Error when allocating memzone for QAT_%d",
+ qat_dev_id);
+ return NULL;
+ }
+
+ qat_dev = qat_pci_devs[qat_dev_id].mz->addr;
memset(qat_dev, 0, sizeof(*qat_dev));
strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
qat_dev->qat_dev_id = qat_dev_id;
- qat_dev->pci_dev = pci_dev;
- switch (qat_dev->pci_dev->id.device_id) {
+ qat_pci_devs[qat_dev_id].pci_dev = pci_dev;
+ switch (pci_dev->id.device_id) {
case 0x0443:
qat_dev->qat_dev_gen = QAT_GEN1;
break;
break;
default:
QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+ rte_memzone_free(qat_pci_devs[qat_dev->qat_dev_id].mz);
return NULL;
}
qat_dev_parse_cmd(devargs->drv_str, qat_dev_cmd_param);
rte_spinlock_init(&qat_dev->arb_csr_lock);
-
- qat_dev->attached = QAT_ATTACHED;
-
qat_nb_pci_devices++;
- QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
+ QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
return qat_dev;
}
-int
+static int
qat_pci_device_release(struct rte_pci_device *pci_dev)
{
struct qat_pci_device *qat_dev;
char name[QAT_DEV_NAME_MAX_LEN];
+ int busy = 0;
if (pci_dev == NULL)
return -EINVAL;
qat_dev = qat_pci_get_named_dev(name);
if (qat_dev != NULL) {
+ struct qat_device_info *inst =
+ &qat_pci_devs[qat_dev->qat_dev_id];
/* Check that there are no service devs still on pci device */
- if (qat_dev->sym_dev != NULL)
- return -EBUSY;
- qat_dev->attached = QAT_DETACHED;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (qat_dev->sym_dev != NULL) {
+ QAT_LOG(DEBUG, "QAT sym device %s is busy",
+ name);
+ busy = 1;
+ }
+ if (qat_dev->asym_dev != NULL) {
+ QAT_LOG(DEBUG, "QAT asym device %s is busy",
+ name);
+ busy = 1;
+ }
+ if (qat_dev->comp_dev != NULL) {
+ QAT_LOG(DEBUG, "QAT comp device %s is busy",
+ name);
+ busy = 1;
+ }
+ if (busy)
+ return -EBUSY;
+ rte_memzone_free(inst->mz);
+ }
+ memset(inst, 0, sizeof(struct qat_device_info));
qat_nb_pci_devices--;
+ QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
+ name, qat_nb_pci_devices);
}
- QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
- name, qat_nb_pci_devices);
return 0;
}
QAT_NUM_INTERM_BUFS_GEN3 = 20
};
+struct qat_device_info {
+ const struct rte_memzone *mz;
+ /**< mz to store the qat_pci_device so it can be
+ * shared across processes
+ */
+ struct rte_pci_device *pci_dev;
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto sym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ struct rte_device asym_rte_dev;
+ /**< This represents the crypto asym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ struct rte_device comp_rte_dev;
+ /**< This represents the compression subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a compression-specific name
+ */
+};
+
+extern struct qat_device_info qat_pci_devs[];
+
+struct qat_sym_dev_private;
+struct qat_asym_dev_private;
+struct qat_comp_dev_private;
+
/*
* This struct holds all the data about a QAT pci device
* including data about all services it supports.
* - hw_data
* - config data
* - runtime data
+ * Note: as this data can be shared in a multi-process scenario,
+ * any pointers in it must also point to shared memory.
*/
-struct qat_sym_dev_private;
-struct qat_asym_dev_private;
-struct qat_comp_dev_private;
-
struct qat_pci_device {
/* Data used by all services */
char name[QAT_DEV_NAME_MAX_LEN];
/**< Name of qat pci device */
uint8_t qat_dev_id;
- /**< Device instance for this qat pci device */
- struct rte_pci_device *pci_dev;
- /**< PCI information. */
+ /**< Id of device instance for this qat pci device */
enum qat_device_gen qat_dev_gen;
/**< QAT device generation */
rte_spinlock_t arb_csr_lock;
/**< lock to protect accesses to the arbiter CSR */
- __extension__
- uint8_t attached : 1;
- /**< Flag indicating the device is attached */
struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
/**< links to qps set up for each service, index same as on API */
/* Data relating to symmetric crypto service */
struct qat_sym_dev_private *sym_dev;
/**< link back to cryptodev private data */
- struct rte_device sym_rte_dev;
- /**< This represents the crypto sym subset of this pci device.
- * Register with this rather than with the one in
- * pci_dev so that its driver can have a crypto-specific name
- */
/* Data relating to asymmetric crypto service */
struct qat_asym_dev_private *asym_dev;
/**< link back to cryptodev private data */
- struct rte_device asym_rte_dev;
- /**< This represents the crypto asym subset of this pci device.
- * Register with this rather than with the one in
- * pci_dev so that its driver can have a crypto-specific name
- */
/* Data relating to compression service */
struct qat_comp_dev_private *comp_dev;
/**< link back to compressdev private data */
- struct rte_device comp_rte_dev;
- /**< This represents the compression subset of this pci device.
- * Register with this rather than with the one in
- * pci_dev so that its driver can have a compression-specific name
- */
-
- /* Data relating to asymmetric crypto service */
-
};
struct qat_gen_hw_data {
qat_pci_device_allocate(struct rte_pci_device *pci_dev,
struct qat_dev_cmd_param *qat_dev_cmd_param);
-int
-qat_pci_device_release(struct rte_pci_device *pci_dev);
-
struct qat_pci_device *
qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
{
struct qat_qp *qp;
- struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ struct rte_pci_device *pci_dev =
+ qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
char op_cookie_pool_name[RTE_RING_NAMESIZE];
uint32_t i;
qp->nb_descriptors,
qat_qp_conf->cookie_size, 64, 0,
NULL, NULL, NULL, NULL,
- qat_dev->pci_dev->device.numa_node,
+ pci_dev->device.numa_node,
0);
if (!qp->op_cookie_pool) {
QAT_LOG(ERR, "QAT PMD Cannot create"
uint64_t queue_base;
void *io_addr;
const struct rte_memzone *qp_mz;
- struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ struct rte_pci_device *pci_dev =
+ qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
int ret = 0;
uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
qp_conf->service_str, "qp_mem",
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
- qat_dev->pci_dev->device.numa_node);
+ pci_dev->device.numa_node);
if (qp_mz == NULL) {
QAT_LOG(ERR, "Failed to allocate ring memzone");
return -ENOMEM;
while (nb_ops_sent != nb_ops_possible) {
- ret = tmp_qp->build_request(*ops, base_addr + tail,
+ if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
+#ifdef BUILD_QAT_SYM
+ ret = qat_sym_build_request(*ops, base_addr + tail,
tmp_qp->op_cookies[tail >> queue->trailz],
tmp_qp->qat_dev_gen);
+#endif
+ } else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
+ ret = qat_comp_build_request(*ops, base_addr + tail,
+ tmp_qp->op_cookies[tail >> queue->trailz],
+ tmp_qp->qat_dev_gen);
+ } else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
+#ifdef BUILD_QAT_ASYM
+ ret = qat_asym_build_request(*ops, base_addr + tail,
+ tmp_qp->op_cookies[tail >> queue->trailz],
+ tmp_qp->qat_dev_gen);
+#endif
+ }
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/* This message cannot be enqueued */
struct qat_dev_cmd_param *qat_dev_cmd_param)
{
int i = 0;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
if (qat_pci_dev->qat_dev_gen == QAT_GEN3) {
QAT_LOG(ERR, "Compression PMD not supported on QAT P5xxx");
return 0;
struct rte_compressdev_pmd_init_params init_params = {
.name = "",
- .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .socket_id = qat_dev_instance->pci_dev->device.numa_node,
};
char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
struct rte_compressdev *compressdev;
QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
/* Populate subset device to use in compressdev device creation */
- qat_pci_dev->comp_rte_dev.driver = &compdev_qat_driver;
- qat_pci_dev->comp_rte_dev.numa_node =
- qat_pci_dev->pci_dev->device.numa_node;
- qat_pci_dev->comp_rte_dev.devargs = NULL;
+ qat_dev_instance->comp_rte_dev.driver = &compdev_qat_driver;
+ qat_dev_instance->comp_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->comp_rte_dev.devargs = NULL;
compressdev = rte_compressdev_pmd_create(name,
- &(qat_pci_dev->comp_rte_dev),
+ &(qat_dev_instance->comp_rte_dev),
sizeof(struct qat_comp_dev_private),
&init_params);
compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
comp_dev = compressdev->data->dev_private;
comp_dev->qat_dev = qat_pci_dev;
comp_dev->compressdev = compressdev;
- qat_pci_dev->comp_dev = comp_dev;
switch (qat_pci_dev->qat_dev_gen) {
case QAT_GEN1:
comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
break;
default:
- comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
QAT_LOG(DEBUG,
"QAT gen %d capabilities unknown, default to GEN1",
qat_pci_dev->qat_dev_gen);
qat_dev_cmd_param[i].val;
i++;
}
+ qat_pci_dev->comp_dev = comp_dev;
QAT_LOG(DEBUG,
"Created QAT COMP device %s as compressdev instance %d",
struct qat_dev_cmd_param *qat_dev_cmd_param)
{
int i = 0;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
- .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .socket_id =
+ qat_dev_instance->pci_dev->device.numa_node,
.private_data_size = sizeof(struct qat_asym_dev_private)
};
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
/* Populate subset device to use in cryptodev device creation */
- qat_pci_dev->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
- qat_pci_dev->asym_rte_dev.numa_node =
- qat_pci_dev->pci_dev->device.numa_node;
- qat_pci_dev->asym_rte_dev.devargs = NULL;
+ qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+ qat_dev_instance->asym_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->asym_rte_dev.devargs = NULL;
cryptodev = rte_cryptodev_pmd_create(name,
- &(qat_pci_dev->asym_rte_dev), &init_params);
+ &(qat_dev_instance->asym_rte_dev), &init_params);
if (cryptodev == NULL)
return -ENODEV;
- qat_pci_dev->asym_rte_dev.name = cryptodev->data->name;
+ qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
cryptodev->driver_id = cryptodev_qat_asym_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
RTE_CRYPTODEV_FF_ASYM_SESSIONLESS |
RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP |
RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
- qat_pci_dev->asym_dev = internals;
-
internals->asym_dev_id = cryptodev->data->dev_id;
internals->qat_dev_capabilities = qat_gen1_asym_capabilities;
i++;
}
+ qat_pci_dev->asym_dev = internals;
QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
cryptodev->data->name, internals->asym_dev_id);
return 0;
cryptodev = rte_cryptodev_pmd_get_dev(
qat_pci_dev->asym_dev->asym_dev_id);
rte_cryptodev_pmd_destroy(cryptodev);
- qat_pci_dev->asym_rte_dev.name = NULL;
+ qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
qat_pci_dev->asym_dev = NULL;
return 0;
struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
{
int i = 0;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_pci_dev->qat_dev_id];
+
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
- .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .socket_id =
+ qat_dev_instance->pci_dev->device.numa_node,
.private_data_size = sizeof(struct qat_sym_dev_private)
};
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *cryptodev;
struct qat_sym_dev_private *internals;
+
#ifdef RTE_LIBRTE_SECURITY
struct rte_security_ctx *security_instance;
#endif
QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
/* Populate subset device to use in cryptodev device creation */
- qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
- qat_pci_dev->sym_rte_dev.numa_node =
- qat_pci_dev->pci_dev->device.numa_node;
- qat_pci_dev->sym_rte_dev.devargs = NULL;
+ qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_dev_instance->sym_rte_dev.numa_node =
+ qat_dev_instance->pci_dev->device.numa_node;
+ qat_dev_instance->sym_rte_dev.devargs = NULL;
cryptodev = rte_cryptodev_pmd_create(name,
- &(qat_pci_dev->sym_rte_dev), &init_params);
+ &(qat_dev_instance->sym_rte_dev), &init_params);
if (cryptodev == NULL)
return -ENODEV;
- qat_pci_dev->sym_rte_dev.name = cryptodev->data->name;
+ qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
cryptodev->driver_id = cryptodev_qat_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
RTE_CRYPTODEV_FF_SECURITY;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
#ifdef RTE_LIBRTE_SECURITY
security_instance = rte_malloc("qat_sec",
sizeof(struct rte_security_ctx),
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
- qat_pci_dev->sym_dev = internals;
internals->sym_dev_id = cryptodev->data->dev_id;
switch (qat_pci_dev->qat_dev_gen) {
internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
break;
default:
- internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
QAT_LOG(DEBUG,
"QAT gen %d capabilities unknown, default to GEN2",
qat_pci_dev->qat_dev_gen);
i++;
}
+ qat_pci_dev->sym_dev = internals;
QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
cryptodev->data->name, internals->sym_dev_id);
+
return 0;
}
rte_free(cryptodev->security_ctx);
#endif
rte_cryptodev_pmd_destroy(cryptodev);
- qat_pci_dev->sym_rte_dev.name = NULL;
+ qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
qat_pci_dev->sym_dev = NULL;
return 0;