X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_asym_pmd.c;h=d56294d3d972b97f1d0196e31ca24683a6936dff;hb=e0a6761022bd368e114fdea27809391ef0ad2c41;hp=78fc2d723e2ff2205e22e04bf6522ff88208b0bb;hpb=e2c5f4ea994cfcaafeb686bcf7b6884f558cb4ba;p=dpdk.git diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c index 78fc2d723e..d56294d3d9 100644 --- a/drivers/crypto/qat/qat_asym_pmd.c +++ b/drivers/crypto/qat/qat_asym_pmd.c @@ -2,207 +2,56 @@ * Copyright(c) 2019 Intel Corporation */ -#include +#include #include "qat_logs.h" +#include "qat_crypto.h" #include "qat_asym.h" #include "qat_asym_pmd.h" -#include "qat_sym_capabilities.h" -#include "qat_asym_capabilities.h" -uint8_t cryptodev_qat_asym_driver_id; +extern uint8_t qat_asym_driver_id; +extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS]; -static const struct rte_cryptodev_capabilities qat_gen1_asym_capabilities[] = { - QAT_BASE_GEN1_ASYM_CAPABILITIES, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - -static int qat_asym_qp_release(struct rte_cryptodev *dev, - uint16_t queue_pair_id); - -static int qat_asym_dev_config(__rte_unused struct rte_cryptodev *dev, - __rte_unused struct rte_cryptodev_config *config) -{ - return 0; -} - -static int qat_asym_dev_start(__rte_unused struct rte_cryptodev *dev) -{ - return 0; -} - -static void qat_asym_dev_stop(__rte_unused struct rte_cryptodev *dev) +void +qat_asym_init_op_cookie(void *op_cookie) { + int j; + struct qat_asym_op_cookie *cookie = op_cookie; -} - -static int qat_asym_dev_close(struct rte_cryptodev *dev) -{ - int i, ret; + cookie->input_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_params_ptrs); - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - ret = qat_asym_qp_release(dev, i); - if (ret < 0) - return ret; - } - - return 0; -} + cookie->output_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_params_ptrs); -static void qat_asym_dev_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *info) -{ - struct qat_asym_dev_private *internals = dev->data->dev_private; - const struct qat_qp_hw_data *asym_hw_qps = - qat_gen_config[internals->qat_dev->qat_dev_gen] - .qp_hw_data[QAT_SERVICE_ASYMMETRIC]; - - if (info != NULL) { - info->max_nb_queue_pairs = qat_qps_per_service(asym_hw_qps, - QAT_SERVICE_ASYMMETRIC); - info->feature_flags = dev->feature_flags; - info->capabilities = internals->qat_dev_capabilities; - info->driver_id = cryptodev_qat_asym_driver_id; - /* No limit of number of sessions */ - info->sym.max_nb_sessions = 0; - } -} - -static void qat_asym_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats) -{ - struct qat_common_stats qat_stats = {0}; - struct qat_asym_dev_private *qat_priv; - - if (stats == NULL || dev == NULL) { - QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); - return; - } - qat_priv = dev->data->dev_private; - - qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_ASYMMETRIC); - stats->enqueued_count = qat_stats.enqueued_count; - stats->dequeued_count = qat_stats.dequeued_count; - stats->enqueue_err_count = qat_stats.enqueue_err_count; - stats->dequeue_err_count = qat_stats.dequeue_err_count; -} - -static void qat_asym_stats_reset(struct rte_cryptodev *dev) -{ - struct qat_asym_dev_private *qat_priv; - - if (dev == NULL) { - QAT_LOG(ERR, "invalid asymmetric cryptodev ptr %p", dev); - return; - } - qat_priv = dev->data->dev_private; - - qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_ASYMMETRIC); -} - -static int qat_asym_qp_release(struct rte_cryptodev *dev, - uint16_t queue_pair_id) -{ - struct qat_asym_dev_private *qat_private = dev->data->dev_private; - - QAT_LOG(DEBUG, "Release asym qp %u on device %d", - queue_pair_id, dev->data->dev_id); - - qat_private->qat_dev->qps_in_use[QAT_SERVICE_ASYMMETRIC][queue_pair_id] - = NULL; - - return qat_qp_release((struct qat_qp **) - &(dev->data->queue_pairs[queue_pair_id])); -} - -static int qat_asym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, - const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id) -{ - struct qat_qp_config qat_qp_conf; - struct qat_qp *qp; - int ret = 0; - uint32_t i; - - struct qat_qp **qp_addr = - (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); - struct qat_asym_dev_private *qat_private = dev->data->dev_private; - const struct qat_qp_hw_data *asym_hw_qps = - qat_gen_config[qat_private->qat_dev->qat_dev_gen] - .qp_hw_data[QAT_SERVICE_ASYMMETRIC]; - const struct qat_qp_hw_data *qp_hw_data = asym_hw_qps + qp_id; - - /* If qp is already in use free ring memory and qp metadata. */ - if (*qp_addr != NULL) { - ret = qat_asym_qp_release(dev, qp_id); - if (ret < 0) - return ret; - } - if (qp_id >= qat_qps_per_service(asym_hw_qps, QAT_SERVICE_ASYMMETRIC)) { - QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); - return -EINVAL; - } - - qat_qp_conf.hw = qp_hw_data; - qat_qp_conf.build_request = qat_asym_build_request; - qat_qp_conf.cookie_size = sizeof(struct qat_asym_op_cookie); - qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors; - qat_qp_conf.socket_id = socket_id; - qat_qp_conf.service_str = "asym"; - - ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); - if (ret != 0) - return ret; - - /* store a link to the qp in the qat_pci_device */ - qat_private->qat_dev->qps_in_use[QAT_SERVICE_ASYMMETRIC][qp_id] - = *qp_addr; - - qp = (struct qat_qp *)*qp_addr; - - for (i = 0; i < qp->nb_descriptors; i++) { - int j; - - struct qat_asym_op_cookie __rte_unused *cookie = - qp->op_cookies[i]; - cookie->input_addr = rte_mempool_virt2iova(cookie) + + for (j = 0; j < 8; j++) { + cookie->input_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + offsetof(struct qat_asym_op_cookie, - input_params_ptrs); - - cookie->output_addr = rte_mempool_virt2iova(cookie) + + input_array[j]); + cookie->output_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + offsetof(struct qat_asym_op_cookie, - output_params_ptrs); - - for (j = 0; j < 8; j++) { - cookie->input_params_ptrs[j] = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - input_array[j]); - cookie->output_params_ptrs[j] = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - output_array[j]); - } + output_array[j]); } - - return ret; } -struct rte_cryptodev_ops crypto_qat_ops = { +static struct rte_cryptodev_ops crypto_qat_ops = { /* Device related operations */ - .dev_configure = qat_asym_dev_config, - .dev_start = qat_asym_dev_start, - .dev_stop = qat_asym_dev_stop, - .dev_close = qat_asym_dev_close, - .dev_infos_get = qat_asym_dev_info_get, - - .stats_get = qat_asym_stats_get, - .stats_reset = qat_asym_stats_reset, - .queue_pair_setup = qat_asym_qp_setup, - .queue_pair_release = qat_asym_qp_release, - .queue_pair_count = NULL, + .dev_configure = qat_cryptodev_config, + .dev_start = qat_cryptodev_start, + .dev_stop = qat_cryptodev_stop, + .dev_close = qat_cryptodev_close, + .dev_infos_get = qat_cryptodev_info_get, + + .stats_get = qat_cryptodev_stats_get, + .stats_reset = qat_cryptodev_stats_reset, + .queue_pair_setup = qat_cryptodev_qp_setup, + .queue_pair_release = qat_cryptodev_qp_release, /* Crypto related operations */ .asym_session_get_size = qat_asym_session_get_private_size, @@ -213,13 +62,13 @@ struct rte_cryptodev_ops crypto_qat_ops = { uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); + return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops); } uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); + return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops); } /* An rte_driver is needed in the registration of both the device and the driver @@ -235,53 +84,122 @@ static const struct rte_driver cryptodev_qat_asym_driver = { }; int -qat_asym_dev_create(struct qat_pci_device *qat_pci_dev) +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, + struct qat_dev_cmd_param *qat_dev_cmd_param) { + int i = 0; + struct qat_device_info *qat_dev_instance = + &qat_pci_devs[qat_pci_dev->qat_dev_id]; struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = qat_pci_dev->pci_dev->device.numa_node, - .private_data_size = sizeof(struct qat_asym_dev_private) + .name = "", + .socket_id = qat_dev_instance->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_cryptodev_private) }; + struct qat_capabilities_info capa_info; + const struct rte_cryptodev_capabilities *capabilities; + const struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; struct rte_cryptodev *cryptodev; - struct qat_asym_dev_private *internals; + struct qat_cryptodev_private *internals; + uint64_t capa_size; snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", qat_pci_dev->name, "asym"); QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support asymmetric crypto", + name); + return -EFAULT; + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_asym_driver_id = + qat_asym_driver_id; + } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (qat_pci_dev->qat_asym_driver_id != + qat_asym_driver_id) { + QAT_LOG(ERR, + "Device %s have different driver id than corresponding device in primary process", + name); + return -(EFAULT); + } + } + /* Populate subset device to use in cryptodev device creation */ - qat_pci_dev->asym_rte_dev.driver = &cryptodev_qat_asym_driver; - qat_pci_dev->asym_rte_dev.numa_node = - qat_pci_dev->pci_dev->device.numa_node; - qat_pci_dev->asym_rte_dev.devargs = NULL; + qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; + qat_dev_instance->asym_rte_dev.numa_node = + qat_dev_instance->pci_dev->device.numa_node; + qat_dev_instance->asym_rte_dev.devargs = NULL; cryptodev = rte_cryptodev_pmd_create(name, - &(qat_pci_dev->asym_rte_dev), &init_params); + &(qat_dev_instance->asym_rte_dev), &init_params); if (cryptodev == NULL) return -ENODEV; - qat_pci_dev->asym_rte_dev.name = cryptodev->data->name; - cryptodev->driver_id = cryptodev_qat_asym_driver_id; + qat_dev_instance->asym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = qat_asym_driver_id; cryptodev->dev_ops = &crypto_qat_ops; cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst; cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst; - cryptodev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_HW_ACCELERATED | - RTE_CRYPTODEV_FF_ASYM_SESSIONLESS | - RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP; + + cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, + "QAT_ASYM_CAPA_GEN_%d", + qat_pci_dev->qat_dev_gen); + internals = cryptodev->data->dev_private; internals->qat_dev = qat_pci_dev; + internals->dev_id = cryptodev->data->dev_id; + internals->service_type = QAT_SERVICE_ASYMMETRIC; + + capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); + capabilities = capa_info.data; + capa_size = capa_info.size; + + internals->capa_mz = rte_memzone_lookup(capa_memz_name); + if (internals->capa_mz == NULL) { + internals->capa_mz = rte_memzone_reserve(capa_memz_name, + capa_size, rte_socket_id(), 0); + if (internals->capa_mz == NULL) { + QAT_LOG(DEBUG, + "Error allocating memzone for capabilities, " + "destroying PMD for %s", + name); + rte_cryptodev_pmd_destroy(cryptodev); + memset(&qat_dev_instance->asym_rte_dev, 0, + sizeof(qat_dev_instance->asym_rte_dev)); + return -EFAULT; + } + } + + memcpy(internals->capa_mz->addr, capabilities, capa_size); + internals->qat_dev_capabilities = internals->capa_mz->addr; + + while (1) { + if (qat_dev_cmd_param[i].name == NULL) + break; + if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME)) + internals->min_enq_burst_threshold = + qat_dev_cmd_param[i].val; + i++; + } + qat_pci_dev->asym_dev = internals; - internals->asym_dev_id = cryptodev->data->dev_id; - internals->qat_dev_capabilities = qat_gen1_asym_capabilities; + rte_cryptodev_pmd_probing_finish(cryptodev); QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d", - cryptodev->data->name, internals->asym_dev_id); + cryptodev->data->name, internals->dev_id); return 0; } @@ -294,12 +212,14 @@ qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) return -ENODEV; if (qat_pci_dev->asym_dev == NULL) return 0; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_memzone_free(qat_pci_dev->asym_dev->capa_mz); /* free crypto device */ cryptodev = rte_cryptodev_pmd_get_dev( - qat_pci_dev->asym_dev->asym_dev_id); + qat_pci_dev->asym_dev->dev_id); rte_cryptodev_pmd_destroy(cryptodev); - qat_pci_dev->asym_rte_dev.name = NULL; + qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL; qat_pci_dev->asym_dev = NULL; return 0; @@ -308,4 +228,4 @@ qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) static struct cryptodev_driver qat_crypto_drv; RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, cryptodev_qat_asym_driver, - cryptodev_qat_asym_driver_id); + qat_asym_driver_id);