X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_qp.c;h=7fea10c76a81d64a3a88fd58986810748bfa74f0;hb=242b46b814725df6215c0df5f751e9d814f12af9;hp=a358ccd78b095ac5508573e32068d32b87776a68;hpb=3b84878c4bd44aaae79ea049fb1ee99b68f67c39;p=dpdk.git diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c index a358ccd78b..7fea10c76a 100644 --- a/drivers/crypto/qat/qat_qp.c +++ b/drivers/crypto/qat/qat_qp.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation */ #include @@ -36,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -81,8 +54,6 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, int socket_id) { const struct rte_memzone *mz; - unsigned memzone_flags = 0; - const struct rte_memseg *ms; PMD_INIT_FUNC_TRACE(); mz = rte_memzone_lookup(queue_name); @@ -105,35 +76,13 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", queue_name, queue_size, socket_id); - ms = rte_eal_get_physmem_layout(); - switch (ms[0].hugepage_sz) { - case(RTE_PGSIZE_2M): - memzone_flags = RTE_MEMZONE_2MB; - break; - case(RTE_PGSIZE_1G): - memzone_flags = RTE_MEMZONE_1GB; - break; - case(RTE_PGSIZE_16M): - memzone_flags = RTE_MEMZONE_16MB; - break; - case(RTE_PGSIZE_16G): - memzone_flags = RTE_MEMZONE_16GB; - break; - default: - memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY; -} -#ifdef RTE_LIBRTE_XEN_DOM0 - return rte_memzone_reserve_bounded(queue_name, queue_size, - socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M); -#else - return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id, - memzone_flags, queue_size); -#endif + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); } int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id) + int socket_id, struct rte_mempool *session_pool __rte_unused) { struct qat_qp *qp; struct rte_pci_device *pci_dev; @@ -183,9 +132,14 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", qp_conf->nb_descriptors * sizeof(*qp->op_cookies), RTE_CACHE_LINE_SIZE); + if (qp->op_cookies == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie"); + rte_free(qp); + return -ENOMEM; + } qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; - rte_atomic16_init(&qp->inflights16); + qp->inflights16 = 0; if (qat_tx_queue_create(dev, &(qp->tx_q), queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { @@ -205,7 +159,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, adf_configure_queues(qp); adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr); snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu", - dev->driver->pci_drv.driver.name, dev->data->dev_id, + pci_dev->driver->driver.name, dev->data->dev_id, queue_pair_id); qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); @@ -224,28 +178,34 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, for (i = 0; i < qp->nb_descriptors; i++) { if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie"); - return -EFAULT; + goto create_err; } struct qat_crypto_op_cookie *sql_cookie = qp->op_cookies[i]; sql_cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2phy(qp->op_cookie_pool, - sql_cookie) + + rte_mempool_virt2iova(sql_cookie) + offsetof(struct qat_crypto_op_cookie, qat_sgl_list_src); sql_cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2phy(qp->op_cookie_pool, - sql_cookie) + + rte_mempool_virt2iova(sql_cookie) + offsetof(struct qat_crypto_op_cookie, qat_sgl_list_dst); } + + struct qat_pmd_private *internals + = dev->data->dev_private; + qp->qat_dev_gen = internals->qat_dev_gen; + dev->data->queue_pairs[queue_pair_id] = qp; return 0; create_err: + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + rte_free(qp->op_cookies); rte_free(qp); return -EFAULT; } @@ -263,7 +223,7 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) } /* Don't free memory if there are still responses to be processed */ - if (rte_atomic16_read(&(qp->inflights16)) == 0) { + if (qp->inflights16 == 0) { qat_queue_delete(&(qp->tx_q)); qat_queue_delete(&(qp->rx_q)); } else { @@ -355,11 +315,13 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, return -EINVAL; } + pci_dev = RTE_DEV_TO_PCI(dev->device); + /* * Allocate a memzone for the queue - create a unique name. */ snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d", - dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id, + pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id, queue->hw_bundle_number, queue->hw_queue_number); qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, socket_id); @@ -369,7 +331,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, } queue->base_addr = (char *)qp_mz->addr; - queue->base_phys_addr = qp_mz->phys_addr; + queue->base_phys_addr = qp_mz->iova; if (qat_qp_check_queue_alignment(queue->base_phys_addr, queue_size_bytes)) { PMD_DRV_LOG(ERR, "Invalid alignment on queue create " @@ -408,7 +370,6 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, queue->queue_size); - pci_dev = RTE_DEV_TO_PCI(dev->device); io_addr = pci_dev->mem_resource[0].addr;