1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
6 #include "qat_comp_pmd.h"
9 qat_comp_stats_get(struct rte_compressdev *dev,
10 struct rte_compressdev_stats *stats)
12 struct qat_common_stats qat_stats = {0};
13 struct qat_comp_dev_private *qat_priv;
15 if (stats == NULL || dev == NULL) {
16 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
19 qat_priv = dev->data->dev_private;
21 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
22 stats->enqueued_count = qat_stats.enqueued_count;
23 stats->dequeued_count = qat_stats.dequeued_count;
24 stats->enqueue_err_count = qat_stats.enqueue_err_count;
25 stats->dequeue_err_count = qat_stats.dequeue_err_count;
29 qat_comp_stats_reset(struct rte_compressdev *dev)
31 struct qat_comp_dev_private *qat_priv;
34 QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
37 qat_priv = dev->data->dev_private;
39 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
44 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
46 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
48 QAT_LOG(DEBUG, "Release comp qp %u on device %d",
49 queue_pair_id, dev->data->dev_id);
51 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
54 return qat_qp_release((struct qat_qp **)
55 &(dev->data->queue_pairs[queue_pair_id]));
59 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
60 uint32_t max_inflight_ops, int socket_id)
63 struct qat_qp_config qat_qp_conf;
65 struct qat_qp **qp_addr =
66 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
67 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
68 const struct qat_qp_hw_data *comp_hw_qps =
69 qat_gen_config[qat_private->qat_dev->qat_dev_gen]
70 .qp_hw_data[QAT_SERVICE_COMPRESSION];
71 const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
73 /* If qp is already in use free ring memory and qp metadata. */
74 if (*qp_addr != NULL) {
75 ret = qat_comp_qp_release(dev, qp_id);
79 if (qp_id >= qat_qps_per_service(comp_hw_qps,
80 QAT_SERVICE_COMPRESSION)) {
81 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
85 qat_qp_conf.hw = qp_hw_data;
86 qat_qp_conf.build_request = qat_comp_build_request;
87 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
88 qat_qp_conf.nb_descriptors = max_inflight_ops;
89 qat_qp_conf.socket_id = socket_id;
90 qat_qp_conf.service_str = "comp";
92 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
96 /* store a link to the qp in the qat_pci_device */
97 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
103 static struct rte_mempool *
104 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
105 uint32_t num_elements)
107 char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
108 struct rte_mempool *mp;
110 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
111 "%s_xforms", comp_dev->qat_dev->name);
113 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
114 mp = rte_mempool_lookup(xform_pool_name);
117 QAT_LOG(DEBUG, "xformpool already created");
118 if (mp->size != num_elements) {
119 QAT_LOG(DEBUG, "xformpool wrong size - delete it");
120 rte_mempool_free(mp);
122 comp_dev->xformpool = NULL;
127 mp = rte_mempool_create(xform_pool_name,
129 qat_comp_xform_size(), 0, 0,
130 NULL, NULL, NULL, NULL, rte_socket_id(),
133 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
134 xform_pool_name, num_elements, qat_comp_xform_size());
142 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
144 /* Free private_xform pool */
145 if (comp_dev->xformpool) {
146 /* Free internal mempool for private xforms */
147 rte_mempool_free(comp_dev->xformpool);
148 comp_dev->xformpool = NULL;
153 qat_comp_dev_config(struct rte_compressdev *dev,
154 struct rte_compressdev_config *config)
156 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
159 if (config->max_nb_streams != 0) {
161 "QAT device does not support STATEFUL so max_nb_streams must be 0");
165 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
166 config->max_nb_priv_xforms);
167 if (comp_dev->xformpool == NULL) {
175 _qat_comp_dev_config_clear(comp_dev);
180 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
186 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
192 qat_comp_dev_close(struct rte_compressdev *dev)
196 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
198 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
199 ret = qat_comp_qp_release(dev, i);
204 _qat_comp_dev_config_clear(comp_dev);
211 qat_comp_dev_info_get(struct rte_compressdev *dev,
212 struct rte_compressdev_info *info)
214 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
215 const struct qat_qp_hw_data *comp_hw_qps =
216 qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
217 .qp_hw_data[QAT_SERVICE_COMPRESSION];
220 info->max_nb_queue_pairs =
221 qat_qps_per_service(comp_hw_qps,
222 QAT_SERVICE_COMPRESSION);
223 info->feature_flags = dev->feature_flags;
224 info->capabilities = comp_dev->qat_dev_capabilities;
229 qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
232 return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
236 qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
239 return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
243 struct rte_compressdev_ops compress_qat_ops = {
245 /* Device related operations */
246 .dev_configure = qat_comp_dev_config,
247 .dev_start = qat_comp_dev_start,
248 .dev_stop = qat_comp_dev_stop,
249 .dev_close = qat_comp_dev_close,
250 .dev_infos_get = qat_comp_dev_info_get,
252 .stats_get = qat_comp_stats_get,
253 .stats_reset = qat_comp_stats_reset,
254 .queue_pair_setup = qat_comp_qp_setup,
255 .queue_pair_release = qat_comp_qp_release,
257 /* Compression related operations */
258 .private_xform_create = qat_comp_private_xform_create,
259 .private_xform_free = qat_comp_private_xform_free