From: Tomasz Jozwiak Date: Tue, 26 Mar 2019 13:51:24 +0000 (+0100) Subject: compress/qat: add dynamic SGL allocation X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=352332744c3a;p=dpdk.git compress/qat: add dynamic SGL allocation This patch adds dynamic SGL allocation instead of static one. The number of element in SGL can be adjusted in each operation depend of the request. Signed-off-by: Tomasz Jozwiak Acked-by: Fiona Trahe --- diff --git a/config/common_base b/config/common_base index 0a08b49ef8..a0a9fe0c76 100644 --- a/config/common_base +++ b/config/common_base @@ -564,7 +564,6 @@ CONFIG_RTE_LIBRTE_PMD_QAT_ASYM=n # Max. number of QuickAssist devices, which can be detected and attached # CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48 -CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16 CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536 # diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst index 5631cb15af..6f583a4605 100644 --- a/doc/guides/compressdevs/qat_comp.rst +++ b/doc/guides/compressdevs/qat_comp.rst @@ -35,7 +35,6 @@ Limitations * Compressdev level 0, no compression, is not supported. * Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported). * No BSD support as BSD QAT kernel driver not available. -* Number of segments in mbuf chains in the op must be <= RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS from the config file. * When using Deflate dynamic huffman encoding for compression, the input size (op.src.length) must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config file, see :ref:`building_qat_config` for more details. diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst index 4a887ca828..2c4ee62f1d 100644 --- a/doc/guides/cryptodevs/qat.rst +++ b/doc/guides/cryptodevs/qat.rst @@ -167,7 +167,6 @@ These are the build configuration options affecting QAT, and their default value CONFIG_RTE_LIBRTE_PMD_QAT=y CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48 - CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16 CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536 CONFIG_RTE_LIBRTE_PMD_QAT must be enabled for any QAT PMD to be built. @@ -185,10 +184,6 @@ Note, there are separate config items for max cryptodevs CONFIG_RTE_CRYPTO_MAX_D and max compressdevs CONFIG_RTE_COMPRESS_MAX_DEVS, if necessary these should be adjusted to handle the total of QAT and other devices which the process will use. -QAT allocates internal structures to handle SGLs. For the compression service -CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS can be changed if more segments are needed. -An extra (max_inflight_ops x 16) bytes per queue_pair will be used for every increment. - QAT compression PMD needs intermediate buffers to support Deflate compression with Dynamic Huffman encoding. CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE specifies the size of a single buffer, the PMD will allocate a multiple of these, diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c index b9367d3773..2fadb021ff 100644 --- a/drivers/compress/qat/qat_comp.c +++ b/drivers/compress/qat/qat_comp.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018-2019 Intel Corporation */ #include @@ -79,22 +79,70 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, QAT_COMN_PTR_TYPE_SGL); + if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) { + /* we need to allocate more elements in SGL*/ + void *tmp; + + tmp = rte_realloc_socket(cookie->qat_sgl_src_d, + sizeof(struct qat_sgl) + + sizeof(struct qat_flat_buf) * + op->m_src->nb_segs, 64, + cookie->socket_id); + + if (unlikely(tmp == NULL)) { + QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" + " for %d elements of SGL", + op->m_src->nb_segs); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -ENOMEM; + } + /* new SGL is valid now */ + cookie->qat_sgl_src_d = (struct qat_sgl *)tmp; + cookie->src_nb_elems = op->m_src->nb_segs; + cookie->qat_sgl_src_phys_addr = + rte_malloc_virt2iova(cookie->qat_sgl_src_d); + } + ret = qat_sgl_fill_array(op->m_src, op->src.offset, - &cookie->qat_sgl_src, + cookie->qat_sgl_src_d, op->src.length, - RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + cookie->src_nb_elems); if (ret) { QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array"); op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; return ret; } + if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) { + /* we need to allocate more elements in SGL*/ + struct qat_sgl *tmp; + + tmp = rte_realloc_socket(cookie->qat_sgl_dst_d, + sizeof(struct qat_sgl) + + sizeof(struct qat_flat_buf) * + op->m_dst->nb_segs, 64, + cookie->socket_id); + + if (unlikely(tmp == NULL)) { + QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" + " for %d elements of SGL", + op->m_dst->nb_segs); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + /* new SGL is valid now */ + cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp; + cookie->dst_nb_elems = op->m_dst->nb_segs; + cookie->qat_sgl_dst_phys_addr = + rte_malloc_virt2iova(cookie->qat_sgl_dst_d); + } + ret = qat_sgl_fill_array(op->m_dst, op->dst.offset, - &cookie->qat_sgl_dst, + cookie->qat_sgl_dst_d, comp_req->comp_pars.out_buffer_sz, - RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + cookie->dst_nb_elems); if (ret) { QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array"); op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h index 12b37b1055..1312ee93b4 100644 --- a/drivers/compress/qat/qat_comp.h +++ b/drivers/compress/qat/qat_comp.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation + * Copyright(c) 2015-2019 Intel Corporation */ #ifndef _QAT_COMP_H_ @@ -40,16 +40,16 @@ struct qat_inter_sgl { struct qat_flat_buf buffers[QAT_NUM_BUFS_IN_IM_SGL]; } __rte_packed __rte_cache_aligned; -struct qat_comp_sgl { - qat_sgl_hdr; - struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS]; -} __rte_packed __rte_cache_aligned; struct qat_comp_op_cookie { - struct qat_comp_sgl qat_sgl_src; - struct qat_comp_sgl qat_sgl_dst; phys_addr_t qat_sgl_src_phys_addr; phys_addr_t qat_sgl_dst_phys_addr; + /* dynamically created SGLs */ + uint8_t socket_id; + uint16_t src_nb_elems; + uint16_t dst_nb_elems; + struct qat_sgl *qat_sgl_src_d; + struct qat_sgl *qat_sgl_dst_d; }; struct qat_comp_xform { diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c index d45cd675b9..cf7a9e884f 100644 --- a/drivers/compress/qat/qat_comp_pmd.c +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -1,10 +1,14 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation + * Copyright(c) 2015-2019 Intel Corporation */ +#include + #include "qat_comp.h" #include "qat_comp_pmd.h" +#define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16 + static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { {/* COMPRESSION - deflate */ .algo = RTE_COMP_ALGO_DEFLATE, @@ -60,6 +64,10 @@ static int qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id) { struct qat_comp_dev_private *qat_private = dev->data->dev_private; + struct qat_qp **qp_addr = + (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]); + struct qat_qp *qp = (struct qat_qp *)*qp_addr; + uint32_t i; QAT_LOG(DEBUG, "Release comp qp %u on device %d", queue_pair_id, dev->data->dev_id); @@ -67,6 +75,14 @@ qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id) qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id] = NULL; + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_comp_op_cookie *cookie = qp->op_cookies[i]; + + rte_free(cookie->qat_sgl_src_d); + rte_free(cookie->qat_sgl_dst_d); + } + return qat_qp_release((struct qat_qp **) &(dev->data->queue_pairs[queue_pair_id])); } @@ -122,15 +138,36 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, struct qat_comp_op_cookie *cookie = qp->op_cookies[i]; + cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL, + sizeof(struct qat_sgl) + + sizeof(struct qat_flat_buf) * + QAT_PMD_COMP_SGL_DEF_SEGMENTS, + 64, dev->data->socket_id); + + cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL, + sizeof(struct qat_sgl) + + sizeof(struct qat_flat_buf) * + QAT_PMD_COMP_SGL_DEF_SEGMENTS, + 64, dev->data->socket_id); + + if (cookie->qat_sgl_src_d == NULL || + cookie->qat_sgl_dst_d == NULL) { + QAT_LOG(ERR, "Can't allocate SGL" + " for device %s", + qat_private->qat_dev->name); + return -ENOMEM; + } + cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_comp_op_cookie, - qat_sgl_src); + rte_malloc_virt2iova(cookie->qat_sgl_src_d); cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_comp_op_cookie, - qat_sgl_dst); + rte_malloc_virt2iova(cookie->qat_sgl_dst_d); + + cookie->dst_nb_elems = cookie->src_nb_elems = + QAT_PMD_COMP_SGL_DEF_SEGMENTS; + + cookie->socket_id = dev->data->socket_id; } return ret;