1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2019 Intel Corporation
5 #include <rte_malloc.h>
8 #include "qat_comp_pmd.h"
10 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
12 struct stream_create_info {
13 struct qat_comp_dev_private *comp_dev;
18 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
19 {/* COMPRESSION - deflate */
20 .algo = RTE_COMP_ALGO_DEFLATE,
21 .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
22 RTE_COMP_FF_CRC32_CHECKSUM |
23 RTE_COMP_FF_ADLER32_CHECKSUM |
24 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
25 RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
26 RTE_COMP_FF_HUFFMAN_FIXED |
27 RTE_COMP_FF_HUFFMAN_DYNAMIC |
28 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
29 RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
30 RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
31 RTE_COMP_FF_STATEFUL_DECOMPRESSION,
32 .window_size = {.min = 15, .max = 15, .increment = 0} },
33 {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
36 qat_comp_stats_get(struct rte_compressdev *dev,
37 struct rte_compressdev_stats *stats)
39 struct qat_common_stats qat_stats = {0};
40 struct qat_comp_dev_private *qat_priv;
42 if (stats == NULL || dev == NULL) {
43 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
46 qat_priv = dev->data->dev_private;
48 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
49 stats->enqueued_count = qat_stats.enqueued_count;
50 stats->dequeued_count = qat_stats.dequeued_count;
51 stats->enqueue_err_count = qat_stats.enqueue_err_count;
52 stats->dequeue_err_count = qat_stats.dequeue_err_count;
56 qat_comp_stats_reset(struct rte_compressdev *dev)
58 struct qat_comp_dev_private *qat_priv;
61 QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
64 qat_priv = dev->data->dev_private;
66 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
71 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
73 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
74 struct qat_qp **qp_addr =
75 (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]);
76 struct qat_qp *qp = (struct qat_qp *)*qp_addr;
79 QAT_LOG(DEBUG, "Release comp qp %u on device %d",
80 queue_pair_id, dev->data->dev_id);
82 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
86 for (i = 0; i < qp->nb_descriptors; i++) {
87 struct qat_comp_op_cookie *cookie = qp->op_cookies[i];
89 rte_free(cookie->qat_sgl_src_d);
90 rte_free(cookie->qat_sgl_dst_d);
93 return qat_qp_release((struct qat_qp **)
94 &(dev->data->queue_pairs[queue_pair_id]));
98 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
99 uint32_t max_inflight_ops, int socket_id)
104 struct qat_qp_config qat_qp_conf;
106 struct qat_qp **qp_addr =
107 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
108 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
109 const struct qat_qp_hw_data *comp_hw_qps =
110 qat_gen_config[qat_private->qat_dev->qat_dev_gen]
111 .qp_hw_data[QAT_SERVICE_COMPRESSION];
112 const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
114 /* If qp is already in use free ring memory and qp metadata. */
115 if (*qp_addr != NULL) {
116 ret = qat_comp_qp_release(dev, qp_id);
120 if (qp_id >= qat_qps_per_service(comp_hw_qps,
121 QAT_SERVICE_COMPRESSION)) {
122 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
126 qat_qp_conf.hw = qp_hw_data;
127 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
128 qat_qp_conf.nb_descriptors = max_inflight_ops;
129 qat_qp_conf.socket_id = socket_id;
130 qat_qp_conf.service_str = "comp";
132 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
136 /* store a link to the qp in the qat_pci_device */
137 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
140 qp = (struct qat_qp *)*qp_addr;
141 qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold;
143 for (i = 0; i < qp->nb_descriptors; i++) {
145 struct qat_comp_op_cookie *cookie =
149 cookie->cookie_index = i;
151 cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
152 sizeof(struct qat_sgl) +
153 sizeof(struct qat_flat_buf) *
154 QAT_PMD_COMP_SGL_DEF_SEGMENTS,
155 64, dev->data->socket_id);
157 cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL,
158 sizeof(struct qat_sgl) +
159 sizeof(struct qat_flat_buf) *
160 QAT_PMD_COMP_SGL_DEF_SEGMENTS,
161 64, dev->data->socket_id);
163 if (cookie->qat_sgl_src_d == NULL ||
164 cookie->qat_sgl_dst_d == NULL) {
165 QAT_LOG(ERR, "Can't allocate SGL"
167 qat_private->qat_dev->name);
171 cookie->qat_sgl_src_phys_addr =
172 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
174 cookie->qat_sgl_dst_phys_addr =
175 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
177 cookie->dst_nb_elems = cookie->src_nb_elems =
178 QAT_PMD_COMP_SGL_DEF_SEGMENTS;
180 cookie->socket_id = dev->data->socket_id;
189 #define QAT_IM_BUFFER_DEBUG 0
190 static const struct rte_memzone *
191 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
194 char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE];
195 const struct rte_memzone *memzone;
196 uint8_t *mz_start = NULL;
197 rte_iova_t mz_start_phys = 0;
198 struct array_of_ptrs *array_of_pointers;
199 int size_of_ptr_array;
201 uint32_t offset_of_flat_buffs;
203 int num_im_sgls = qat_gen_config[
204 comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
206 QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls",
207 comp_dev->qat_dev->name, num_im_sgls);
208 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
209 "%s_inter_buff", comp_dev->qat_dev->name);
210 memzone = rte_memzone_lookup(inter_buff_mz_name);
211 if (memzone != NULL) {
212 QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already");
216 /* Create multiple memzones to hold intermediate buffers and associated
217 * meta-data needed by the firmware.
218 * The first memzone contains:
219 * - a list of num_im_sgls physical pointers to sgls
220 * All other memzones contain:
221 * - the sgl structure, pointing to QAT_NUM_BUFS_IN_IM_SGL flat buffers
222 * - the flat buffers: QAT_NUM_BUFS_IN_IM_SGL buffers,
224 * num_im_sgls depends on the hardware generation of the device
225 * buff_size comes from the user via the config file
228 size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t);
229 offset_of_flat_buffs = sizeof(struct qat_inter_sgl);
230 full_size = offset_of_flat_buffs +
231 buff_size * QAT_NUM_BUFS_IN_IM_SGL;
233 memzone = rte_memzone_reserve_aligned(inter_buff_mz_name,
235 comp_dev->compressdev->data->socket_id,
236 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
237 if (memzone == NULL) {
239 "Can't allocate intermediate buffers for device %s",
240 comp_dev->qat_dev->name);
244 mz_start = (uint8_t *)memzone->addr;
245 mz_start_phys = memzone->iova;
246 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
247 ", size required %d, size created %zu",
248 inter_buff_mz_name, mz_start, mz_start_phys,
249 size_of_ptr_array, memzone->len);
251 array_of_pointers = (struct array_of_ptrs *)mz_start;
252 for (i = 0; i < num_im_sgls; i++) {
253 const struct rte_memzone *mz;
254 struct qat_inter_sgl *sgl;
257 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
258 "%s_inter_buff_%d", comp_dev->qat_dev->name, i);
259 mz = rte_memzone_lookup(inter_buff_mz_name);
261 mz = rte_memzone_reserve_aligned(inter_buff_mz_name,
263 comp_dev->compressdev->data->socket_id,
264 RTE_MEMZONE_IOVA_CONTIG,
268 "Can't allocate intermediate buffers for device %s",
269 comp_dev->qat_dev->name);
271 snprintf(inter_buff_mz_name,
272 RTE_MEMZONE_NAMESIZE,
274 comp_dev->qat_dev->name,
278 inter_buff_mz_name));
280 rte_memzone_free(memzone);
285 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
286 ", size required %d, size created %zu",
287 inter_buff_mz_name, mz->addr, mz->iova,
290 array_of_pointers->pointer[i] = mz->iova;
292 sgl = (struct qat_inter_sgl *) mz->addr;
293 sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL;
294 sgl->num_mapped_bufs = 0;
297 #if QAT_IM_BUFFER_DEBUG
298 QAT_LOG(DEBUG, " : phys addr of sgl[%i] in array_of_pointers"
299 " = 0x%"PRIx64, i, array_of_pointers->pointer[i]);
300 QAT_LOG(DEBUG, " : virt address of sgl[%i] = %p", i, sgl);
302 for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) {
303 sgl->buffers[lb].addr =
304 mz->iova + offset_of_flat_buffs +
306 sgl->buffers[lb].len = buff_size;
307 sgl->buffers[lb].resrvd = 0;
308 #if QAT_IM_BUFFER_DEBUG
310 " : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d",
311 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len);
315 #if QAT_IM_BUFFER_DEBUG
316 QAT_DP_HEXDUMP_LOG(DEBUG, "IM buffer memzone start:",
317 memzone->addr, size_of_ptr_array);
322 static struct rte_mempool *
323 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
324 struct rte_compressdev_config *config,
325 uint32_t num_elements)
327 char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
328 struct rte_mempool *mp;
330 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
331 "%s_xforms", comp_dev->qat_dev->name);
333 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
334 mp = rte_mempool_lookup(xform_pool_name);
337 QAT_LOG(DEBUG, "xformpool already created");
338 if (mp->size != num_elements) {
339 QAT_LOG(DEBUG, "xformpool wrong size - delete it");
340 rte_mempool_free(mp);
342 comp_dev->xformpool = NULL;
347 mp = rte_mempool_create(xform_pool_name,
349 qat_comp_xform_size(), 0, 0,
350 NULL, NULL, NULL, NULL, config->socket_id,
353 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
354 xform_pool_name, num_elements, qat_comp_xform_size());
362 qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
363 void *obj, unsigned int obj_idx)
365 struct stream_create_info *info = opaque;
366 struct qat_comp_stream *stream = obj;
367 char mz_name[RTE_MEMZONE_NAMESIZE];
368 const struct rte_memzone *memzone;
369 struct qat_inter_sgl *ram_banks_desc;
371 /* find a memzone for RAM banks */
372 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
373 info->comp_dev->qat_dev->name, obj_idx);
374 memzone = rte_memzone_lookup(mz_name);
375 if (memzone == NULL) {
376 /* allocate a memzone for compression state and RAM banks */
377 memzone = rte_memzone_reserve_aligned(mz_name,
378 QAT_STATE_REGISTERS_MAX_SIZE
379 + sizeof(struct qat_inter_sgl)
380 + QAT_INFLATE_CONTEXT_SIZE,
382 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
383 if (memzone == NULL) {
385 "Can't allocate RAM banks for device %s, object %u",
386 info->comp_dev->qat_dev->name, obj_idx);
387 info->error = -ENOMEM;
392 /* prepare the buffer list descriptor for RAM banks */
393 ram_banks_desc = (struct qat_inter_sgl *)
394 (((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
395 ram_banks_desc->num_bufs = 1;
396 ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
397 ram_banks_desc->buffers[0].addr = memzone->iova
398 + QAT_STATE_REGISTERS_MAX_SIZE
399 + sizeof(struct qat_inter_sgl);
401 memset(stream, 0, qat_comp_stream_size());
402 stream->memzone = memzone;
403 stream->state_registers_decomp = memzone->addr;
404 stream->state_registers_decomp_phys = memzone->iova;
405 stream->inflate_context = ((uint8_t *) memzone->addr)
406 + QAT_STATE_REGISTERS_MAX_SIZE;
407 stream->inflate_context_phys = memzone->iova
408 + QAT_STATE_REGISTERS_MAX_SIZE;
412 qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
413 void *opaque __rte_unused, void *obj,
414 unsigned obj_idx __rte_unused)
416 struct qat_comp_stream *stream = obj;
418 rte_memzone_free(stream->memzone);
421 static struct rte_mempool *
422 qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
424 uint32_t num_elements)
426 char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
427 struct rte_mempool *mp;
429 snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
430 "%s_streams", comp_dev->qat_dev->name);
432 QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
433 mp = rte_mempool_lookup(stream_pool_name);
436 QAT_LOG(DEBUG, "streampool already created");
437 if (mp->size != num_elements) {
438 QAT_LOG(DEBUG, "streampool wrong size - delete it");
439 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
440 rte_mempool_free(mp);
442 comp_dev->streampool = NULL;
447 struct stream_create_info info = {
448 .comp_dev = comp_dev,
449 .socket_id = socket_id,
452 mp = rte_mempool_create(stream_pool_name,
454 qat_comp_stream_size(), 0, 0,
455 NULL, NULL, qat_comp_stream_init, &info,
459 "Err creating mempool %s w %d elements of size %d",
460 stream_pool_name, num_elements,
461 qat_comp_stream_size());
462 } else if (info.error) {
463 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
465 "Destoying mempool %s as at least one element failed initialisation",
467 rte_mempool_free(mp);
476 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
478 /* Free intermediate buffers */
479 if (comp_dev->interm_buff_mz) {
480 char mz_name[RTE_MEMZONE_NAMESIZE];
481 int i = qat_gen_config[
482 comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
485 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
487 comp_dev->qat_dev->name, i);
488 rte_memzone_free(rte_memzone_lookup(mz_name));
490 rte_memzone_free(comp_dev->interm_buff_mz);
491 comp_dev->interm_buff_mz = NULL;
494 /* Free private_xform pool */
495 if (comp_dev->xformpool) {
496 /* Free internal mempool for private xforms */
497 rte_mempool_free(comp_dev->xformpool);
498 comp_dev->xformpool = NULL;
501 /* Free stream pool */
502 if (comp_dev->streampool) {
503 rte_mempool_obj_iter(comp_dev->streampool,
504 qat_comp_stream_destroy, NULL);
505 rte_mempool_free(comp_dev->streampool);
506 comp_dev->streampool = NULL;
511 qat_comp_dev_config(struct rte_compressdev *dev,
512 struct rte_compressdev_config *config)
514 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
517 if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
519 "RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
520 " QAT device can't be used for Dynamic Deflate. "
521 "Did you really intend to do this?");
523 comp_dev->interm_buff_mz =
524 qat_comp_setup_inter_buffers(comp_dev,
525 RTE_PMD_QAT_COMP_IM_BUFFER_SIZE);
526 if (comp_dev->interm_buff_mz == NULL) {
532 if (config->max_nb_priv_xforms) {
533 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
534 config, config->max_nb_priv_xforms);
535 if (comp_dev->xformpool == NULL) {
540 comp_dev->xformpool = NULL;
542 if (config->max_nb_streams) {
543 comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
544 config->socket_id, config->max_nb_streams);
545 if (comp_dev->streampool == NULL) {
550 comp_dev->streampool = NULL;
555 _qat_comp_dev_config_clear(comp_dev);
560 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
566 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
572 qat_comp_dev_close(struct rte_compressdev *dev)
576 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
578 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
579 ret = qat_comp_qp_release(dev, i);
584 _qat_comp_dev_config_clear(comp_dev);
591 qat_comp_dev_info_get(struct rte_compressdev *dev,
592 struct rte_compressdev_info *info)
594 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
595 const struct qat_qp_hw_data *comp_hw_qps =
596 qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
597 .qp_hw_data[QAT_SERVICE_COMPRESSION];
600 info->max_nb_queue_pairs =
601 qat_qps_per_service(comp_hw_qps,
602 QAT_SERVICE_COMPRESSION);
603 info->feature_flags = dev->feature_flags;
604 info->capabilities = comp_dev->qat_dev_capabilities;
609 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
610 struct rte_comp_op **ops __rte_unused,
611 uint16_t nb_ops __rte_unused)
613 QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
617 static struct rte_compressdev_ops compress_qat_dummy_ops = {
619 /* Device related operations */
620 .dev_configure = NULL,
622 .dev_stop = qat_comp_dev_stop,
623 .dev_close = qat_comp_dev_close,
624 .dev_infos_get = NULL,
627 .stats_reset = qat_comp_stats_reset,
628 .queue_pair_setup = NULL,
629 .queue_pair_release = qat_comp_qp_release,
631 /* Compression related operations */
632 .private_xform_create = NULL,
633 .private_xform_free = qat_comp_private_xform_free
637 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
640 uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
641 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
644 if ((*ops)->debug_status ==
645 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
646 tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
647 qat_comp_pmd_enq_deq_dummy_op_burst;
648 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
649 qat_comp_pmd_enq_deq_dummy_op_burst;
651 tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
652 &compress_qat_dummy_ops;
654 "This QAT hardware doesn't support compression operation");
657 tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
658 (compressdev_dequeue_pkt_burst_t)
659 qat_dequeue_op_burst;
665 static struct rte_compressdev_ops compress_qat_ops = {
667 /* Device related operations */
668 .dev_configure = qat_comp_dev_config,
669 .dev_start = qat_comp_dev_start,
670 .dev_stop = qat_comp_dev_stop,
671 .dev_close = qat_comp_dev_close,
672 .dev_infos_get = qat_comp_dev_info_get,
674 .stats_get = qat_comp_stats_get,
675 .stats_reset = qat_comp_stats_reset,
676 .queue_pair_setup = qat_comp_qp_setup,
677 .queue_pair_release = qat_comp_qp_release,
679 /* Compression related operations */
680 .private_xform_create = qat_comp_private_xform_create,
681 .private_xform_free = qat_comp_private_xform_free,
682 .stream_create = qat_comp_stream_create,
683 .stream_free = qat_comp_stream_free
686 /* An rte_driver is needed in the registration of the device with compressdev.
687 * The actual qat pci's rte_driver can't be used as its name represents
688 * the whole pci device with all services. Think of this as a holder for a name
689 * for the compression part of the pci device.
691 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD);
692 static const struct rte_driver compdev_qat_driver = {
693 .name = qat_comp_drv_name,
694 .alias = qat_comp_drv_name
697 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
698 struct qat_dev_cmd_param *qat_dev_cmd_param)
701 struct qat_device_info *qat_dev_instance =
702 &qat_pci_devs[qat_pci_dev->qat_dev_id];
703 struct rte_compressdev_pmd_init_params init_params = {
705 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
707 char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
708 char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
709 struct rte_compressdev *compressdev;
710 struct qat_comp_dev_private *comp_dev;
711 const struct rte_compressdev_capabilities *capabilities;
714 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
715 qat_pci_dev->name, "comp");
716 QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
718 /* Populate subset device to use in compressdev device creation */
719 qat_dev_instance->comp_rte_dev.driver = &compdev_qat_driver;
720 qat_dev_instance->comp_rte_dev.numa_node =
721 qat_dev_instance->pci_dev->device.numa_node;
722 qat_dev_instance->comp_rte_dev.devargs = NULL;
724 compressdev = rte_compressdev_pmd_create(name,
725 &(qat_dev_instance->comp_rte_dev),
726 sizeof(struct qat_comp_dev_private),
729 if (compressdev == NULL)
732 compressdev->dev_ops = &compress_qat_ops;
734 compressdev->enqueue_burst = (compressdev_enqueue_pkt_burst_t)
735 qat_enqueue_comp_op_burst;
736 compressdev->dequeue_burst = qat_comp_pmd_dequeue_first_op_burst;
738 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
740 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
743 snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN,
744 "QAT_COMP_CAPA_GEN_%d",
745 qat_pci_dev->qat_dev_gen);
747 comp_dev = compressdev->data->dev_private;
748 comp_dev->qat_dev = qat_pci_dev;
749 comp_dev->compressdev = compressdev;
751 switch (qat_pci_dev->qat_dev_gen) {
755 capabilities = qat_comp_gen_capabilities;
756 capa_size = sizeof(qat_comp_gen_capabilities);
759 capabilities = qat_comp_gen_capabilities;
760 capa_size = sizeof(qat_comp_gen_capabilities);
762 "QAT gen %d capabilities unknown, default to GEN1",
763 qat_pci_dev->qat_dev_gen);
767 comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name);
768 if (comp_dev->capa_mz == NULL) {
769 comp_dev->capa_mz = rte_memzone_reserve(capa_memz_name,
773 if (comp_dev->capa_mz == NULL) {
775 "Error allocating memzone for capabilities, destroying PMD for %s",
777 memset(&qat_dev_instance->comp_rte_dev, 0,
778 sizeof(qat_dev_instance->comp_rte_dev));
779 rte_compressdev_pmd_destroy(compressdev);
783 memcpy(comp_dev->capa_mz->addr, capabilities, capa_size);
784 comp_dev->qat_dev_capabilities = comp_dev->capa_mz->addr;
787 if (qat_dev_cmd_param[i].name == NULL)
789 if (!strcmp(qat_dev_cmd_param[i].name, COMP_ENQ_THRESHOLD_NAME))
790 comp_dev->min_enq_burst_threshold =
791 qat_dev_cmd_param[i].val;
794 qat_pci_dev->comp_dev = comp_dev;
797 "Created QAT COMP device %s as compressdev instance %d",
798 name, compressdev->data->dev_id);
803 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
805 struct qat_comp_dev_private *comp_dev;
807 if (qat_pci_dev == NULL)
810 comp_dev = qat_pci_dev->comp_dev;
811 if (comp_dev == NULL)
814 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
815 rte_memzone_free(qat_pci_dev->comp_dev->capa_mz);
817 /* clean up any resources used by the device */
818 qat_comp_dev_close(comp_dev->compressdev);
820 rte_compressdev_pmd_destroy(comp_dev->compressdev);
821 qat_pci_dev->comp_dev = NULL;