This patch adds dynamic SGL allocation instead of static one.
The number of element in SGL can be adjusted in each operation
depend of the request.
Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>
# Max. number of QuickAssist devices, which can be detected and attached
#
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
-CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
#
* Compressdev level 0, no compression, is not supported.
* Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported).
* No BSD support as BSD QAT kernel driver not available.
-* Number of segments in mbuf chains in the op must be <= RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS from the config file.
* When using Deflate dynamic huffman encoding for compression, the input size (op.src.length)
must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config file,
see :ref:`building_qat_config` for more details.
CONFIG_RTE_LIBRTE_PMD_QAT=y
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
- CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
CONFIG_RTE_LIBRTE_PMD_QAT must be enabled for any QAT PMD to be built.
and max compressdevs CONFIG_RTE_COMPRESS_MAX_DEVS, if necessary these should be
adjusted to handle the total of QAT and other devices which the process will use.
-QAT allocates internal structures to handle SGLs. For the compression service
-CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS can be changed if more segments are needed.
-An extra (max_inflight_ops x 16) bytes per queue_pair will be used for every increment.
-
QAT compression PMD needs intermediate buffers to support Deflate compression
with Dynamic Huffman encoding. CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE
specifies the size of a single buffer, the PMD will allocate a multiple of these,
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2019 Intel Corporation
*/
#include <rte_mempool.h>
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
+ if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
+ /* we need to allocate more elements in SGL*/
+ void *tmp;
+
+ tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ op->m_src->nb_segs, 64,
+ cookie->socket_id);
+
+ if (unlikely(tmp == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
+ " for %d elements of SGL",
+ op->m_src->nb_segs);
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -ENOMEM;
+ }
+ /* new SGL is valid now */
+ cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
+ cookie->src_nb_elems = op->m_src->nb_segs;
+ cookie->qat_sgl_src_phys_addr =
+ rte_malloc_virt2iova(cookie->qat_sgl_src_d);
+ }
+
ret = qat_sgl_fill_array(op->m_src,
op->src.offset,
- &cookie->qat_sgl_src,
+ cookie->qat_sgl_src_d,
op->src.length,
- RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ cookie->src_nb_elems);
if (ret) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
return ret;
}
+ if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
+ /* we need to allocate more elements in SGL*/
+ struct qat_sgl *tmp;
+
+ tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ op->m_dst->nb_segs, 64,
+ cookie->socket_id);
+
+ if (unlikely(tmp == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
+ " for %d elements of SGL",
+ op->m_dst->nb_segs);
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ /* new SGL is valid now */
+ cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
+ cookie->dst_nb_elems = op->m_dst->nb_segs;
+ cookie->qat_sgl_dst_phys_addr =
+ rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
+ }
+
ret = qat_sgl_fill_array(op->m_dst,
op->dst.offset,
- &cookie->qat_sgl_dst,
+ cookie->qat_sgl_dst_d,
comp_req->comp_pars.out_buffer_sz,
- RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ cookie->dst_nb_elems);
if (ret) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
#ifndef _QAT_COMP_H_
struct qat_flat_buf buffers[QAT_NUM_BUFS_IN_IM_SGL];
} __rte_packed __rte_cache_aligned;
-struct qat_comp_sgl {
- qat_sgl_hdr;
- struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
-} __rte_packed __rte_cache_aligned;
struct qat_comp_op_cookie {
- struct qat_comp_sgl qat_sgl_src;
- struct qat_comp_sgl qat_sgl_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
+ /* dynamically created SGLs */
+ uint8_t socket_id;
+ uint16_t src_nb_elems;
+ uint16_t dst_nb_elems;
+ struct qat_sgl *qat_sgl_src_d;
+ struct qat_sgl *qat_sgl_dst_d;
};
struct qat_comp_xform {
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
*/
+#include <rte_malloc.h>
+
#include "qat_comp.h"
#include "qat_comp_pmd.h"
+#define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
+
static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
{/* COMPRESSION - deflate */
.algo = RTE_COMP_ALGO_DEFLATE,
qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
{
struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]);
+ struct qat_qp *qp = (struct qat_qp *)*qp_addr;
+ uint32_t i;
QAT_LOG(DEBUG, "Release comp qp %u on device %d",
queue_pair_id, dev->data->dev_id);
qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
= NULL;
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie = qp->op_cookies[i];
+
+ rte_free(cookie->qat_sgl_src_d);
+ rte_free(cookie->qat_sgl_dst_d);
+ }
+
return qat_qp_release((struct qat_qp **)
&(dev->data->queue_pairs[queue_pair_id]));
}
struct qat_comp_op_cookie *cookie =
qp->op_cookies[i];
+ cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ QAT_PMD_COMP_SGL_DEF_SEGMENTS,
+ 64, dev->data->socket_id);
+
+ cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ QAT_PMD_COMP_SGL_DEF_SEGMENTS,
+ 64, dev->data->socket_id);
+
+ if (cookie->qat_sgl_src_d == NULL ||
+ cookie->qat_sgl_dst_d == NULL) {
+ QAT_LOG(ERR, "Can't allocate SGL"
+ " for device %s",
+ qat_private->qat_dev->name);
+ return -ENOMEM;
+ }
+
cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_comp_op_cookie,
- qat_sgl_src);
+ rte_malloc_virt2iova(cookie->qat_sgl_src_d);
cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(cookie) +
- offsetof(struct qat_comp_op_cookie,
- qat_sgl_dst);
+ rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
+
+ cookie->dst_nb_elems = cookie->src_nb_elems =
+ QAT_PMD_COMP_SGL_DEF_SEGMENTS;
+
+ cookie->socket_id = dev->data->socket_id;
}
return ret;