1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
6 #include "qat_comp_pmd.h"
8 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
9 {/* COMPRESSION - deflate */
10 .algo = RTE_COMP_ALGO_DEFLATE,
11 .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
12 RTE_COMP_FF_CRC32_CHECKSUM |
13 RTE_COMP_FF_ADLER32_CHECKSUM |
14 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
15 RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
16 RTE_COMP_FF_HUFFMAN_FIXED |
17 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
18 RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
19 RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
20 .window_size = {.min = 15, .max = 15, .increment = 0} },
21 {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
24 qat_comp_stats_get(struct rte_compressdev *dev,
25 struct rte_compressdev_stats *stats)
27 struct qat_common_stats qat_stats = {0};
28 struct qat_comp_dev_private *qat_priv;
30 if (stats == NULL || dev == NULL) {
31 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
34 qat_priv = dev->data->dev_private;
36 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
37 stats->enqueued_count = qat_stats.enqueued_count;
38 stats->dequeued_count = qat_stats.dequeued_count;
39 stats->enqueue_err_count = qat_stats.enqueue_err_count;
40 stats->dequeue_err_count = qat_stats.dequeue_err_count;
44 qat_comp_stats_reset(struct rte_compressdev *dev)
46 struct qat_comp_dev_private *qat_priv;
49 QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
52 qat_priv = dev->data->dev_private;
54 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
59 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
61 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
63 QAT_LOG(DEBUG, "Release comp qp %u on device %d",
64 queue_pair_id, dev->data->dev_id);
66 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
69 return qat_qp_release((struct qat_qp **)
70 &(dev->data->queue_pairs[queue_pair_id]));
74 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
75 uint32_t max_inflight_ops, int socket_id)
80 struct qat_qp_config qat_qp_conf;
82 struct qat_qp **qp_addr =
83 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
84 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
85 const struct qat_qp_hw_data *comp_hw_qps =
86 qat_gen_config[qat_private->qat_dev->qat_dev_gen]
87 .qp_hw_data[QAT_SERVICE_COMPRESSION];
88 const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
90 /* If qp is already in use free ring memory and qp metadata. */
91 if (*qp_addr != NULL) {
92 ret = qat_comp_qp_release(dev, qp_id);
96 if (qp_id >= qat_qps_per_service(comp_hw_qps,
97 QAT_SERVICE_COMPRESSION)) {
98 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
102 qat_qp_conf.hw = qp_hw_data;
103 qat_qp_conf.build_request = qat_comp_build_request;
104 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
105 qat_qp_conf.nb_descriptors = max_inflight_ops;
106 qat_qp_conf.socket_id = socket_id;
107 qat_qp_conf.service_str = "comp";
109 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
113 /* store a link to the qp in the qat_pci_device */
114 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
117 qp = (struct qat_qp *)*qp_addr;
119 for (i = 0; i < qp->nb_descriptors; i++) {
121 struct qat_comp_op_cookie *cookie =
124 cookie->qat_sgl_src_phys_addr =
125 rte_mempool_virt2iova(cookie) +
126 offsetof(struct qat_comp_op_cookie,
129 cookie->qat_sgl_dst_phys_addr =
130 rte_mempool_virt2iova(cookie) +
131 offsetof(struct qat_comp_op_cookie,
138 static struct rte_mempool *
139 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
140 uint32_t num_elements)
142 char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
143 struct rte_mempool *mp;
145 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
146 "%s_xforms", comp_dev->qat_dev->name);
148 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
149 mp = rte_mempool_lookup(xform_pool_name);
152 QAT_LOG(DEBUG, "xformpool already created");
153 if (mp->size != num_elements) {
154 QAT_LOG(DEBUG, "xformpool wrong size - delete it");
155 rte_mempool_free(mp);
157 comp_dev->xformpool = NULL;
162 mp = rte_mempool_create(xform_pool_name,
164 qat_comp_xform_size(), 0, 0,
165 NULL, NULL, NULL, NULL, rte_socket_id(),
168 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
169 xform_pool_name, num_elements, qat_comp_xform_size());
177 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
179 /* Free private_xform pool */
180 if (comp_dev->xformpool) {
181 /* Free internal mempool for private xforms */
182 rte_mempool_free(comp_dev->xformpool);
183 comp_dev->xformpool = NULL;
188 qat_comp_dev_config(struct rte_compressdev *dev,
189 struct rte_compressdev_config *config)
191 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
194 if (config->max_nb_streams != 0) {
196 "QAT device does not support STATEFUL so max_nb_streams must be 0");
200 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
201 config->max_nb_priv_xforms);
202 if (comp_dev->xformpool == NULL) {
210 _qat_comp_dev_config_clear(comp_dev);
215 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
221 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
227 qat_comp_dev_close(struct rte_compressdev *dev)
231 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
233 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
234 ret = qat_comp_qp_release(dev, i);
239 _qat_comp_dev_config_clear(comp_dev);
246 qat_comp_dev_info_get(struct rte_compressdev *dev,
247 struct rte_compressdev_info *info)
249 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
250 const struct qat_qp_hw_data *comp_hw_qps =
251 qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
252 .qp_hw_data[QAT_SERVICE_COMPRESSION];
255 info->max_nb_queue_pairs =
256 qat_qps_per_service(comp_hw_qps,
257 QAT_SERVICE_COMPRESSION);
258 info->feature_flags = dev->feature_flags;
259 info->capabilities = comp_dev->qat_dev_capabilities;
264 qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
267 return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
271 qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
274 return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
278 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
279 struct rte_comp_op **ops __rte_unused,
280 uint16_t nb_ops __rte_unused)
282 QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
286 static struct rte_compressdev_ops compress_qat_dummy_ops = {
288 /* Device related operations */
289 .dev_configure = NULL,
291 .dev_stop = qat_comp_dev_stop,
292 .dev_close = qat_comp_dev_close,
293 .dev_infos_get = NULL,
296 .stats_reset = qat_comp_stats_reset,
297 .queue_pair_setup = NULL,
298 .queue_pair_release = qat_comp_qp_release,
300 /* Compression related operations */
301 .private_xform_create = NULL,
302 .private_xform_free = qat_comp_private_xform_free
306 qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
309 uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
310 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
313 if ((*ops)->debug_status ==
314 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
315 tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
316 qat_comp_pmd_enq_deq_dummy_op_burst;
317 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
318 qat_comp_pmd_enq_deq_dummy_op_burst;
320 tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
321 &compress_qat_dummy_ops;
322 QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
325 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
326 qat_comp_pmd_dequeue_op_burst;
332 static struct rte_compressdev_ops compress_qat_ops = {
334 /* Device related operations */
335 .dev_configure = qat_comp_dev_config,
336 .dev_start = qat_comp_dev_start,
337 .dev_stop = qat_comp_dev_stop,
338 .dev_close = qat_comp_dev_close,
339 .dev_infos_get = qat_comp_dev_info_get,
341 .stats_get = qat_comp_stats_get,
342 .stats_reset = qat_comp_stats_reset,
343 .queue_pair_setup = qat_comp_qp_setup,
344 .queue_pair_release = qat_comp_qp_release,
346 /* Compression related operations */
347 .private_xform_create = qat_comp_private_xform_create,
348 .private_xform_free = qat_comp_private_xform_free
352 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
354 if (qat_pci_dev->qat_dev_gen == QAT_GEN1) {
355 QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
359 struct rte_compressdev_pmd_init_params init_params = {
361 .socket_id = qat_pci_dev->pci_dev->device.numa_node,
363 char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
364 struct rte_compressdev *compressdev;
365 struct qat_comp_dev_private *comp_dev;
367 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
368 qat_pci_dev->name, "comp");
369 QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
371 compressdev = rte_compressdev_pmd_create(name,
372 &qat_pci_dev->pci_dev->device,
373 sizeof(struct qat_comp_dev_private),
376 if (compressdev == NULL)
379 compressdev->dev_ops = &compress_qat_ops;
381 compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
382 compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
384 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
386 comp_dev = compressdev->data->dev_private;
387 comp_dev->qat_dev = qat_pci_dev;
388 comp_dev->compressdev = compressdev;
389 qat_pci_dev->comp_dev = comp_dev;
391 switch (qat_pci_dev->qat_dev_gen) {
394 comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
397 comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
399 "QAT gen %d capabilities unknown, default to GEN1",
400 qat_pci_dev->qat_dev_gen);
405 "Created QAT COMP device %s as compressdev instance %d",
406 name, compressdev->data->dev_id);
411 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
413 struct qat_comp_dev_private *comp_dev;
415 if (qat_pci_dev == NULL)
418 comp_dev = qat_pci_dev->comp_dev;
419 if (comp_dev == NULL)
422 /* clean up any resources used by the device */
423 qat_comp_dev_close(comp_dev->compressdev);
425 rte_compressdev_pmd_destroy(comp_dev->compressdev);
426 qat_pci_dev->comp_dev = NULL;