1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
14 #include "test_compressdev_test_buffer.h"
17 #define DEFAULT_WINDOW_SIZE 15
18 #define DEFAULT_MEM_LEVEL 8
19 #define MAX_DEQD_RETRIES 10
20 #define DEQUEUE_WAIT_TIME 10000
23 * 30% extra size for compressed data compared to original data,
24 * in case data size cannot be reduced and it is actually bigger
25 * due to the compress block headers
27 #define COMPRESS_BUF_SIZE_RATIO 1.3
30 #define NUM_MAX_XFORMS 1
31 #define NUM_MAX_INFLIGHT_OPS 128
35 huffman_type_strings[] = {
36 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
37 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
38 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
52 struct comp_testsuite_params {
53 struct rte_mempool *mbuf_pool;
54 struct rte_mempool *op_pool;
55 struct rte_comp_xform def_comp_xform;
56 struct rte_comp_xform def_decomp_xform;
59 static struct comp_testsuite_params testsuite_params = { 0 };
62 testsuite_teardown(void)
64 struct comp_testsuite_params *ts_params = &testsuite_params;
66 rte_mempool_free(ts_params->mbuf_pool);
67 rte_mempool_free(ts_params->op_pool);
73 struct comp_testsuite_params *ts_params = &testsuite_params;
76 if (rte_compressdev_count() == 0) {
77 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
81 uint32_t max_buf_size = 0;
82 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
83 max_buf_size = RTE_MAX(max_buf_size,
84 strlen(compress_test_bufs[i]) + 1);
86 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
88 * Buffers to be used in compression and decompression.
89 * Since decompressed data might be larger than
90 * compressed data (due to block header),
91 * buffers should be big enough for both cases.
93 ts_params->mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool",
96 max_buf_size + RTE_PKTMBUF_HEADROOM,
98 if (ts_params->mbuf_pool == NULL) {
99 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
103 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
104 0, sizeof(struct priv_op_data),
106 if (ts_params->op_pool == NULL) {
107 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
111 /* Initializes default values for compress/decompress xforms */
112 ts_params->def_comp_xform.type = RTE_COMP_COMPRESS;
113 ts_params->def_comp_xform.compress.algo = RTE_COMP_ALGO_DEFLATE,
114 ts_params->def_comp_xform.compress.deflate.huffman =
115 RTE_COMP_HUFFMAN_DEFAULT;
116 ts_params->def_comp_xform.compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
117 ts_params->def_comp_xform.compress.chksum = RTE_COMP_CHECKSUM_NONE;
118 ts_params->def_comp_xform.compress.window_size = DEFAULT_WINDOW_SIZE;
120 ts_params->def_decomp_xform.type = RTE_COMP_DECOMPRESS;
121 ts_params->def_decomp_xform.decompress.algo = RTE_COMP_ALGO_DEFLATE,
122 ts_params->def_decomp_xform.decompress.chksum = RTE_COMP_CHECKSUM_NONE;
123 ts_params->def_decomp_xform.decompress.window_size = DEFAULT_WINDOW_SIZE;
128 testsuite_teardown();
134 generic_ut_setup(void)
136 /* Configure compressdev (one device, one queue pair) */
137 struct rte_compressdev_config config = {
138 .socket_id = rte_socket_id(),
140 .max_nb_priv_xforms = NUM_MAX_XFORMS,
144 if (rte_compressdev_configure(0, &config) < 0) {
145 RTE_LOG(ERR, USER1, "Device configuration failed\n");
149 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
150 rte_socket_id()) < 0) {
151 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
155 if (rte_compressdev_start(0) < 0) {
156 RTE_LOG(ERR, USER1, "Device could not be started\n");
164 generic_ut_teardown(void)
166 rte_compressdev_stop(0);
167 if (rte_compressdev_close(0) < 0)
168 RTE_LOG(ERR, USER1, "Device could not be closed\n");
172 compare_buffers(const char *buffer1, uint32_t buffer1_len,
173 const char *buffer2, uint32_t buffer2_len)
175 if (buffer1_len != buffer2_len) {
176 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
180 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
181 RTE_LOG(ERR, USER1, "Buffers are different\n");
189 * Maps compressdev and Zlib flush flags
192 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
195 case RTE_COMP_FLUSH_NONE:
197 case RTE_COMP_FLUSH_SYNC:
199 case RTE_COMP_FLUSH_FULL:
201 case RTE_COMP_FLUSH_FINAL:
204 * There should be only the values above,
205 * so this should never happen
213 compress_zlib(struct rte_comp_op *op,
214 const struct rte_comp_xform *xform, int mem_level)
218 int strategy, window_bits, comp_level;
221 /* initialize zlib stream */
222 stream.zalloc = Z_NULL;
223 stream.zfree = Z_NULL;
224 stream.opaque = Z_NULL;
226 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
229 strategy = Z_DEFAULT_STRATEGY;
232 * Window bits is the base two logarithm of the window size (in bytes).
233 * When doing raw DEFLATE, this number will be negative.
235 window_bits = -(xform->compress.window_size);
237 comp_level = xform->compress.level;
239 if (comp_level != RTE_COMP_LEVEL_NONE)
240 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
241 window_bits, mem_level, strategy);
243 ret = deflateInit(&stream, Z_NO_COMPRESSION);
246 printf("Zlib deflate could not be initialized\n");
250 /* Assuming stateless operation */
251 stream.avail_in = op->src.length;
252 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
253 stream.avail_out = op->m_dst->data_len;
254 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
256 /* Stateless operation, all buffer will be compressed in one go */
257 zlib_flush = map_zlib_flush_flag(op->flush_flag);
258 ret = deflate(&stream, zlib_flush);
260 if (stream.avail_in != 0) {
261 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
265 if (ret != Z_STREAM_END)
268 op->consumed = op->src.length - stream.avail_in;
269 op->produced = op->m_dst->data_len - stream.avail_out;
270 op->status = RTE_COMP_OP_STATUS_SUCCESS;
272 deflateReset(&stream);
282 decompress_zlib(struct rte_comp_op *op,
283 const struct rte_comp_xform *xform)
288 int ret = TEST_FAILED;
290 /* initialize zlib stream */
291 stream.zalloc = Z_NULL;
292 stream.zfree = Z_NULL;
293 stream.opaque = Z_NULL;
296 * Window bits is the base two logarithm of the window size (in bytes).
297 * When doing raw DEFLATE, this number will be negative.
299 window_bits = -(xform->decompress.window_size);
301 ret = inflateInit2(&stream, window_bits);
304 printf("Zlib deflate could not be initialized\n");
308 /* Assuming stateless operation */
309 stream.avail_in = op->src.length;
310 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
311 stream.avail_out = op->m_dst->data_len;
312 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
314 /* Stateless operation, all buffer will be compressed in one go */
315 zlib_flush = map_zlib_flush_flag(op->flush_flag);
316 ret = inflate(&stream, zlib_flush);
318 if (stream.avail_in != 0) {
319 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
323 if (ret != Z_STREAM_END)
326 op->consumed = op->src.length - stream.avail_in;
327 op->produced = op->m_dst->data_len - stream.avail_out;
328 op->status = RTE_COMP_OP_STATUS_SUCCESS;
330 inflateReset(&stream);
340 * Compresses and decompresses buffer with compressdev API and Zlib API
343 test_deflate_comp_decomp(const char * const test_bufs[],
344 unsigned int num_bufs,
346 struct rte_comp_xform *compress_xform,
347 struct rte_comp_xform *decompress_xform,
348 enum rte_comp_op_type state,
349 enum zlib_direction zlib_dir)
351 struct comp_testsuite_params *ts_params = &testsuite_params;
354 struct rte_mbuf *uncomp_bufs[num_bufs];
355 struct rte_mbuf *comp_bufs[num_bufs];
356 struct rte_comp_op *ops[num_bufs];
357 struct rte_comp_op *ops_processed[num_bufs];
358 void *priv_xforms[num_bufs];
359 uint16_t num_enqd, num_deqd, num_total_deqd;
360 uint16_t num_priv_xforms = 0;
361 unsigned int deqd_retries = 0;
362 struct priv_op_data *priv_data;
365 const struct rte_compressdev_capabilities *capa =
366 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
368 /* Initialize all arrays to NULL */
369 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
370 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
371 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
372 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
373 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
375 /* Prepare the source mbufs with the data */
376 ret = rte_pktmbuf_alloc_bulk(ts_params->mbuf_pool, uncomp_bufs, num_bufs);
379 "Source mbufs could not be allocated "
380 "from the mempool\n");
384 for (i = 0; i < num_bufs; i++) {
385 data_ptr = rte_pktmbuf_append(uncomp_bufs[i],
386 strlen(test_bufs[i]) + 1);
387 snprintf(data_ptr, strlen(test_bufs[i]) + 1, "%s",
391 /* Prepare the destination mbufs */
392 ret = rte_pktmbuf_alloc_bulk(ts_params->mbuf_pool, comp_bufs, num_bufs);
395 "Destination mbufs could not be allocated "
396 "from the mempool\n");
400 for (i = 0; i < num_bufs; i++)
401 rte_pktmbuf_append(comp_bufs[i],
402 strlen(test_bufs[i]) * COMPRESS_BUF_SIZE_RATIO);
404 /* Build the compression operations */
405 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
408 "Compress operations could not be allocated "
409 "from the mempool\n");
413 for (i = 0; i < num_bufs; i++) {
414 ops[i]->m_src = uncomp_bufs[i];
415 ops[i]->m_dst = comp_bufs[i];
416 ops[i]->src.offset = 0;
417 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
418 ops[i]->dst.offset = 0;
419 if (state == RTE_COMP_OP_STATELESS) {
420 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
423 "Stateful operations are not supported "
424 "in these tests yet\n");
427 ops[i]->input_chksum = 0;
429 * Store original operation index in private data,
430 * since ordering does not have to be maintained,
431 * when dequeueing from compressdev, so a comparison
432 * at the end of the test can be done.
434 priv_data = (struct priv_op_data *) (ops[i] + 1);
435 priv_data->orig_idx = i;
438 /* Compress data (either with Zlib API or compressdev API */
439 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
440 for (i = 0; i < num_bufs; i++) {
441 ret = compress_zlib(ops[i],
442 (const struct rte_comp_xform *)compress_xform,
447 ops_processed[i] = ops[i];
450 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
451 /* Create single compress private xform data */
452 ret = rte_compressdev_private_xform_create(0,
453 (const struct rte_comp_xform *)compress_xform,
457 "Compression private xform "
458 "could not be created\n");
462 /* Attach shareable private xform data to ops */
463 for (i = 0; i < num_bufs; i++)
464 ops[i]->private_xform = priv_xforms[0];
466 /* Create compress private xform data per op */
467 for (i = 0; i < num_bufs; i++) {
468 ret = rte_compressdev_private_xform_create(0,
469 compress_xform, &priv_xforms[i]);
472 "Compression private xform "
473 "could not be created\n");
479 /* Attach non shareable private xform data to ops */
480 for (i = 0; i < num_bufs; i++)
481 ops[i]->private_xform = priv_xforms[i];
484 /* Enqueue and dequeue all operations */
485 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
486 if (num_enqd < num_bufs) {
488 "The operations could not be enqueued\n");
495 * If retrying a dequeue call, wait for 10 ms to allow
496 * enough time to the driver to process the operations
498 if (deqd_retries != 0) {
500 * Avoid infinite loop if not all the
501 * operations get out of the device
503 if (deqd_retries == MAX_DEQD_RETRIES) {
505 "Not all operations could be "
509 usleep(DEQUEUE_WAIT_TIME);
511 num_deqd = rte_compressdev_dequeue_burst(0, 0,
512 &ops_processed[num_total_deqd], num_bufs);
513 num_total_deqd += num_deqd;
515 } while (num_total_deqd < num_enqd);
519 /* Free compress private xforms */
520 for (i = 0; i < num_priv_xforms; i++) {
521 rte_compressdev_private_xform_free(0, priv_xforms[i]);
522 priv_xforms[i] = NULL;
527 enum rte_comp_huffman huffman_type =
528 compress_xform->compress.deflate.huffman;
529 for (i = 0; i < num_bufs; i++) {
530 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
531 RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
532 "(level = %u, huffman = %s)\n",
533 buf_idx[priv_data->orig_idx],
534 ops_processed[i]->consumed, ops_processed[i]->produced,
535 compress_xform->compress.level,
536 huffman_type_strings[huffman_type]);
537 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
538 (float)ops_processed[i]->produced /
539 ops_processed[i]->consumed * 100);
544 * Check operation status and free source mbufs (destination mbuf and
545 * compress operation information is needed for the decompression stage)
547 for (i = 0; i < num_bufs; i++) {
548 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
550 "Some operations were not successful\n");
553 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
554 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
555 uncomp_bufs[priv_data->orig_idx] = NULL;
558 /* Allocate buffers for decompressed data */
559 ret = rte_pktmbuf_alloc_bulk(ts_params->mbuf_pool, uncomp_bufs, num_bufs);
562 "Destination mbufs could not be allocated "
563 "from the mempool\n");
567 for (i = 0; i < num_bufs; i++) {
568 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
569 rte_pktmbuf_append(uncomp_bufs[i],
570 strlen(test_bufs[priv_data->orig_idx]) + 1);
573 /* Build the decompression operations */
574 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
577 "Decompress operations could not be allocated "
578 "from the mempool\n");
582 /* Source buffer is the compressed data from the previous operations */
583 for (i = 0; i < num_bufs; i++) {
584 ops[i]->m_src = ops_processed[i]->m_dst;
585 ops[i]->m_dst = uncomp_bufs[i];
586 ops[i]->src.offset = 0;
588 * Set the length of the compressed data to the
589 * number of bytes that were produced in the previous stage
591 ops[i]->src.length = ops_processed[i]->produced;
592 ops[i]->dst.offset = 0;
593 if (state == RTE_COMP_OP_STATELESS) {
594 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
597 "Stateful operations are not supported "
598 "in these tests yet\n");
601 ops[i]->input_chksum = 0;
603 * Copy private data from previous operations,
604 * to keep the pointer to the original buffer
606 memcpy(ops[i] + 1, ops_processed[i] + 1,
607 sizeof(struct priv_op_data));
611 * Free the previous compress operations,
612 * as it is not needed anymore
614 for (i = 0; i < num_bufs; i++) {
615 rte_comp_op_free(ops_processed[i]);
616 ops_processed[i] = NULL;
619 /* Decompress data (either with Zlib API or compressdev API */
620 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
621 for (i = 0; i < num_bufs; i++) {
622 ret = decompress_zlib(ops[i],
623 (const struct rte_comp_xform *)decompress_xform);
627 ops_processed[i] = ops[i];
630 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
631 /* Create single decompress private xform data */
632 ret = rte_compressdev_private_xform_create(0,
633 (const struct rte_comp_xform *)decompress_xform,
637 "Decompression private xform "
638 "could not be created\n");
642 /* Attach shareable private xform data to ops */
643 for (i = 0; i < num_bufs; i++)
644 ops[i]->private_xform = priv_xforms[0];
646 /* Create decompress private xform data per op */
647 for (i = 0; i < num_bufs; i++) {
648 ret = rte_compressdev_private_xform_create(0,
649 decompress_xform, &priv_xforms[i]);
652 "Deompression private xform "
653 "could not be created\n");
659 /* Attach non shareable private xform data to ops */
660 for (i = 0; i < num_bufs; i++)
661 ops[i]->private_xform = priv_xforms[i];
664 /* Enqueue and dequeue all operations */
665 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
666 if (num_enqd < num_bufs) {
668 "The operations could not be enqueued\n");
675 * If retrying a dequeue call, wait for 10 ms to allow
676 * enough time to the driver to process the operations
678 if (deqd_retries != 0) {
680 * Avoid infinite loop if not all the
681 * operations get out of the device
683 if (deqd_retries == MAX_DEQD_RETRIES) {
685 "Not all operations could be "
689 usleep(DEQUEUE_WAIT_TIME);
691 num_deqd = rte_compressdev_dequeue_burst(0, 0,
692 &ops_processed[num_total_deqd], num_bufs);
693 num_total_deqd += num_deqd;
695 } while (num_total_deqd < num_enqd);
700 for (i = 0; i < num_bufs; i++) {
701 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
702 RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
703 buf_idx[priv_data->orig_idx],
704 ops_processed[i]->consumed, ops_processed[i]->produced);
709 * Check operation status and free source mbuf (destination mbuf and
710 * compress operation information is still needed)
712 for (i = 0; i < num_bufs; i++) {
713 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
715 "Some operations were not successful\n");
718 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
719 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
720 comp_bufs[priv_data->orig_idx] = NULL;
724 * Compare the original stream with the decompressed stream
725 * (in size and the data)
727 for (i = 0; i < num_bufs; i++) {
728 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
729 const char *buf1 = test_bufs[priv_data->orig_idx];
730 const char *buf2 = rte_pktmbuf_mtod(ops_processed[i]->m_dst,
733 if (compare_buffers(buf1, strlen(buf1) + 1,
734 buf2, ops_processed[i]->produced) < 0)
742 for (i = 0; i < num_bufs; i++) {
743 rte_pktmbuf_free(uncomp_bufs[i]);
744 rte_pktmbuf_free(comp_bufs[i]);
745 rte_comp_op_free(ops[i]);
746 rte_comp_op_free(ops_processed[i]);
748 for (i = 0; i < num_priv_xforms; i++) {
749 if (priv_xforms[i] != NULL)
750 rte_compressdev_private_xform_free(0, priv_xforms[i]);
757 test_compressdev_deflate_stateless_fixed(void)
759 struct comp_testsuite_params *ts_params = &testsuite_params;
760 const char *test_buffer;
762 struct rte_comp_xform compress_xform;
764 memcpy(&compress_xform, &ts_params->def_comp_xform,
765 sizeof(struct rte_comp_xform));
766 compress_xform.compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
768 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
769 test_buffer = compress_test_bufs[i];
771 /* Compress with compressdev, decompress with Zlib */
772 if (test_deflate_comp_decomp(&test_buffer, 1,
775 &ts_params->def_decomp_xform,
776 RTE_COMP_OP_STATELESS,
777 ZLIB_DECOMPRESS) < 0)
780 /* Compress with Zlib, decompress with compressdev */
781 if (test_deflate_comp_decomp(&test_buffer, 1,
784 &ts_params->def_decomp_xform,
785 RTE_COMP_OP_STATELESS,
794 test_compressdev_deflate_stateless_dynamic(void)
796 struct comp_testsuite_params *ts_params = &testsuite_params;
797 const char *test_buffer;
799 struct rte_comp_xform compress_xform;
801 memcpy(&compress_xform, &ts_params->def_comp_xform,
802 sizeof(struct rte_comp_xform));
803 compress_xform.compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
805 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
806 test_buffer = compress_test_bufs[i];
808 /* Compress with compressdev, decompress with Zlib */
809 if (test_deflate_comp_decomp(&test_buffer, 1,
812 &ts_params->def_decomp_xform,
813 RTE_COMP_OP_STATELESS,
814 ZLIB_DECOMPRESS) < 0)
817 /* Compress with Zlib, decompress with compressdev */
818 if (test_deflate_comp_decomp(&test_buffer, 1,
821 &ts_params->def_decomp_xform,
822 RTE_COMP_OP_STATELESS,
831 test_compressdev_deflate_stateless_multi_op(void)
833 struct comp_testsuite_params *ts_params = &testsuite_params;
834 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
835 uint16_t buf_idx[num_bufs];
838 for (i = 0; i < num_bufs; i++)
841 /* Compress with compressdev, decompress with Zlib */
842 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
844 &ts_params->def_comp_xform,
845 &ts_params->def_decomp_xform,
846 RTE_COMP_OP_STATELESS,
847 ZLIB_DECOMPRESS) < 0)
850 /* Compress with Zlib, decompress with compressdev */
851 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
853 &ts_params->def_comp_xform,
854 &ts_params->def_decomp_xform,
855 RTE_COMP_OP_STATELESS,
864 test_compressdev_deflate_stateless_multi_level(void)
866 struct comp_testsuite_params *ts_params = &testsuite_params;
867 const char *test_buffer;
870 struct rte_comp_xform compress_xform;
872 memcpy(&compress_xform, &ts_params->def_comp_xform,
873 sizeof(struct rte_comp_xform));
875 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
876 test_buffer = compress_test_bufs[i];
877 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
879 compress_xform.compress.level = level;
880 /* Compress with compressdev, decompress with Zlib */
881 if (test_deflate_comp_decomp(&test_buffer, 1,
884 &ts_params->def_decomp_xform,
885 RTE_COMP_OP_STATELESS,
886 ZLIB_DECOMPRESS) < 0)
893 static struct unit_test_suite compressdev_testsuite = {
894 .suite_name = "compressdev unit test suite",
895 .setup = testsuite_setup,
896 .teardown = testsuite_teardown,
898 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
899 test_compressdev_deflate_stateless_fixed),
900 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
901 test_compressdev_deflate_stateless_dynamic),
902 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
903 test_compressdev_deflate_stateless_multi_op),
904 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
905 test_compressdev_deflate_stateless_multi_level),
906 TEST_CASES_END() /**< NULL terminate unit test array */
911 test_compressdev(void)
913 return unit_test_suite_runner(&compressdev_testsuite);
916 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);