1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
15 #include "test_compressdev_test_buffer.h"
18 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
26 * 30% extra size for compressed data compared to original data,
27 * in case data size cannot be reduced and it is actually bigger
28 * due to the compress block headers
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
39 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
40 #define ZLIB_HEADER_SIZE 2
41 #define ZLIB_TRAILER_SIZE 4
42 #define GZIP_HEADER_SIZE 10
43 #define GZIP_TRAILER_SIZE 8
46 huffman_type_strings[] = {
47 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
48 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
49 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
63 struct comp_testsuite_params {
64 struct rte_mempool *large_mbuf_pool;
65 struct rte_mempool *small_mbuf_pool;
66 struct rte_mempool *op_pool;
67 struct rte_comp_xform *def_comp_xform;
68 struct rte_comp_xform *def_decomp_xform;
71 static struct comp_testsuite_params testsuite_params = { 0 };
74 testsuite_teardown(void)
76 struct comp_testsuite_params *ts_params = &testsuite_params;
78 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
79 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
80 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
81 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
82 if (rte_mempool_in_use_count(ts_params->op_pool))
83 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
85 rte_mempool_free(ts_params->large_mbuf_pool);
86 rte_mempool_free(ts_params->small_mbuf_pool);
87 rte_mempool_free(ts_params->op_pool);
88 rte_free(ts_params->def_comp_xform);
89 rte_free(ts_params->def_decomp_xform);
95 struct comp_testsuite_params *ts_params = &testsuite_params;
96 uint32_t max_buf_size = 0;
99 if (rte_compressdev_count() == 0) {
100 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
104 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
105 rte_compressdev_name_get(0));
107 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
108 max_buf_size = RTE_MAX(max_buf_size,
109 strlen(compress_test_bufs[i]) + 1);
112 * Buffers to be used in compression and decompression.
113 * Since decompressed data might be larger than
114 * compressed data (due to block header),
115 * buffers should be big enough for both cases.
117 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
118 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
121 max_buf_size + RTE_PKTMBUF_HEADROOM,
123 if (ts_params->large_mbuf_pool == NULL) {
124 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
128 /* Create mempool with smaller buffers for SGL testing */
129 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
130 NUM_LARGE_MBUFS * MAX_SEGS,
132 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
134 if (ts_params->small_mbuf_pool == NULL) {
135 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
139 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
140 0, sizeof(struct priv_op_data),
142 if (ts_params->op_pool == NULL) {
143 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
147 ts_params->def_comp_xform =
148 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
149 if (ts_params->def_comp_xform == NULL) {
151 "Default compress xform could not be created\n");
154 ts_params->def_decomp_xform =
155 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
156 if (ts_params->def_decomp_xform == NULL) {
158 "Default decompress xform could not be created\n");
162 /* Initializes default values for compress/decompress xforms */
163 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
164 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
165 ts_params->def_comp_xform->compress.deflate.huffman =
166 RTE_COMP_HUFFMAN_DEFAULT;
167 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
168 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
169 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
171 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
172 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
173 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
174 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
179 testsuite_teardown();
185 generic_ut_setup(void)
187 /* Configure compressdev (one device, one queue pair) */
188 struct rte_compressdev_config config = {
189 .socket_id = rte_socket_id(),
191 .max_nb_priv_xforms = NUM_MAX_XFORMS,
195 if (rte_compressdev_configure(0, &config) < 0) {
196 RTE_LOG(ERR, USER1, "Device configuration failed\n");
200 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
201 rte_socket_id()) < 0) {
202 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
206 if (rte_compressdev_start(0) < 0) {
207 RTE_LOG(ERR, USER1, "Device could not be started\n");
215 generic_ut_teardown(void)
217 rte_compressdev_stop(0);
218 if (rte_compressdev_close(0) < 0)
219 RTE_LOG(ERR, USER1, "Device could not be closed\n");
223 test_compressdev_invalid_configuration(void)
225 struct rte_compressdev_config invalid_config;
226 struct rte_compressdev_config valid_config = {
227 .socket_id = rte_socket_id(),
229 .max_nb_priv_xforms = NUM_MAX_XFORMS,
232 struct rte_compressdev_info dev_info;
234 /* Invalid configuration with 0 queue pairs */
235 memcpy(&invalid_config, &valid_config,
236 sizeof(struct rte_compressdev_config));
237 invalid_config.nb_queue_pairs = 0;
239 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
240 "Device configuration was successful "
241 "with no queue pairs (invalid)\n");
244 * Invalid configuration with too many queue pairs
245 * (if there is an actual maximum number of queue pairs)
247 rte_compressdev_info_get(0, &dev_info);
248 if (dev_info.max_nb_queue_pairs != 0) {
249 memcpy(&invalid_config, &valid_config,
250 sizeof(struct rte_compressdev_config));
251 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
253 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
254 "Device configuration was successful "
255 "with too many queue pairs (invalid)\n");
258 /* Invalid queue pair setup, with no number of queue pairs set */
259 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
260 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
261 "Queue pair setup was successful "
262 "with no queue pairs set (invalid)\n");
268 compare_buffers(const char *buffer1, uint32_t buffer1_len,
269 const char *buffer2, uint32_t buffer2_len)
271 if (buffer1_len != buffer2_len) {
272 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
276 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
277 RTE_LOG(ERR, USER1, "Buffers are different\n");
285 * Maps compressdev and Zlib flush flags
288 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
291 case RTE_COMP_FLUSH_NONE:
293 case RTE_COMP_FLUSH_SYNC:
295 case RTE_COMP_FLUSH_FULL:
297 case RTE_COMP_FLUSH_FINAL:
300 * There should be only the values above,
301 * so this should never happen
309 compress_zlib(struct rte_comp_op *op,
310 const struct rte_comp_xform *xform, int mem_level)
314 int strategy, window_bits, comp_level;
315 int ret = TEST_FAILED;
316 uint8_t *single_src_buf = NULL;
317 uint8_t *single_dst_buf = NULL;
319 /* initialize zlib stream */
320 stream.zalloc = Z_NULL;
321 stream.zfree = Z_NULL;
322 stream.opaque = Z_NULL;
324 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
327 strategy = Z_DEFAULT_STRATEGY;
330 * Window bits is the base two logarithm of the window size (in bytes).
331 * When doing raw DEFLATE, this number will be negative.
333 window_bits = -(xform->compress.window_size);
334 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
336 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
337 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
339 comp_level = xform->compress.level;
341 if (comp_level != RTE_COMP_LEVEL_NONE)
342 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
343 window_bits, mem_level, strategy);
345 ret = deflateInit(&stream, Z_NO_COMPRESSION);
348 printf("Zlib deflate could not be initialized\n");
352 /* Assuming stateless operation */
354 if (op->m_src->nb_segs > 1) {
355 single_src_buf = rte_malloc(NULL,
356 rte_pktmbuf_pkt_len(op->m_src), 0);
357 if (single_src_buf == NULL) {
358 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
361 single_dst_buf = rte_malloc(NULL,
362 rte_pktmbuf_pkt_len(op->m_dst), 0);
363 if (single_dst_buf == NULL) {
364 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
367 if (rte_pktmbuf_read(op->m_src, 0,
368 rte_pktmbuf_pkt_len(op->m_src),
369 single_src_buf) == NULL) {
371 "Buffer could not be read entirely\n");
375 stream.avail_in = op->src.length;
376 stream.next_in = single_src_buf;
377 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
378 stream.next_out = single_dst_buf;
381 stream.avail_in = op->src.length;
382 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
383 stream.avail_out = op->m_dst->data_len;
384 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
386 /* Stateless operation, all buffer will be compressed in one go */
387 zlib_flush = map_zlib_flush_flag(op->flush_flag);
388 ret = deflate(&stream, zlib_flush);
390 if (stream.avail_in != 0) {
391 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
395 if (ret != Z_STREAM_END)
398 /* Copy data to destination SGL */
399 if (op->m_src->nb_segs > 1) {
400 uint32_t remaining_data = stream.total_out;
401 uint8_t *src_data = single_dst_buf;
402 struct rte_mbuf *dst_buf = op->m_dst;
404 while (remaining_data > 0) {
405 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
408 if (remaining_data < dst_buf->data_len) {
409 memcpy(dst_data, src_data, remaining_data);
412 memcpy(dst_data, src_data, dst_buf->data_len);
413 remaining_data -= dst_buf->data_len;
414 src_data += dst_buf->data_len;
415 dst_buf = dst_buf->next;
420 op->consumed = stream.total_in;
421 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
422 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
423 op->produced = stream.total_out -
424 (ZLIB_HEADER_SIZE + ZLIB_TRAILER_SIZE);
425 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
426 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
427 op->produced = stream.total_out -
428 (GZIP_HEADER_SIZE + GZIP_TRAILER_SIZE);
430 op->produced = stream.total_out;
432 op->status = RTE_COMP_OP_STATUS_SUCCESS;
433 op->output_chksum = stream.adler;
435 deflateReset(&stream);
440 rte_free(single_src_buf);
441 rte_free(single_dst_buf);
447 decompress_zlib(struct rte_comp_op *op,
448 const struct rte_comp_xform *xform)
453 int ret = TEST_FAILED;
454 uint8_t *single_src_buf = NULL;
455 uint8_t *single_dst_buf = NULL;
457 /* initialize zlib stream */
458 stream.zalloc = Z_NULL;
459 stream.zfree = Z_NULL;
460 stream.opaque = Z_NULL;
463 * Window bits is the base two logarithm of the window size (in bytes).
464 * When doing raw DEFLATE, this number will be negative.
466 window_bits = -(xform->decompress.window_size);
467 ret = inflateInit2(&stream, window_bits);
470 printf("Zlib deflate could not be initialized\n");
474 /* Assuming stateless operation */
476 if (op->m_src->nb_segs > 1) {
477 single_src_buf = rte_malloc(NULL,
478 rte_pktmbuf_pkt_len(op->m_src), 0);
479 if (single_src_buf == NULL) {
480 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
483 single_dst_buf = rte_malloc(NULL,
484 rte_pktmbuf_pkt_len(op->m_dst), 0);
485 if (single_dst_buf == NULL) {
486 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
489 if (rte_pktmbuf_read(op->m_src, 0,
490 rte_pktmbuf_pkt_len(op->m_src),
491 single_src_buf) == NULL) {
493 "Buffer could not be read entirely\n");
497 stream.avail_in = op->src.length;
498 stream.next_in = single_src_buf;
499 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
500 stream.next_out = single_dst_buf;
503 stream.avail_in = op->src.length;
504 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
505 stream.avail_out = op->m_dst->data_len;
506 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
509 /* Stateless operation, all buffer will be compressed in one go */
510 zlib_flush = map_zlib_flush_flag(op->flush_flag);
511 ret = inflate(&stream, zlib_flush);
513 if (stream.avail_in != 0) {
514 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
518 if (ret != Z_STREAM_END)
521 if (op->m_src->nb_segs > 1) {
522 uint32_t remaining_data = stream.total_out;
523 uint8_t *src_data = single_dst_buf;
524 struct rte_mbuf *dst_buf = op->m_dst;
526 while (remaining_data > 0) {
527 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
530 if (remaining_data < dst_buf->data_len) {
531 memcpy(dst_data, src_data, remaining_data);
534 memcpy(dst_data, src_data, dst_buf->data_len);
535 remaining_data -= dst_buf->data_len;
536 src_data += dst_buf->data_len;
537 dst_buf = dst_buf->next;
542 op->consumed = stream.total_in;
543 op->produced = stream.total_out;
544 op->status = RTE_COMP_OP_STATUS_SUCCESS;
546 inflateReset(&stream);
556 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
557 uint32_t total_data_size,
558 struct rte_mempool *small_mbuf_pool,
559 struct rte_mempool *large_mbuf_pool,
560 uint8_t limit_segs_in_sgl)
562 uint32_t remaining_data = total_data_size;
563 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
564 struct rte_mempool *pool;
565 struct rte_mbuf *next_seg;
568 const char *data_ptr = test_buf;
572 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
573 num_remaining_segs = limit_segs_in_sgl - 1;
576 * Allocate data in the first segment (header) and
577 * copy data if test buffer is provided
579 if (remaining_data < SMALL_SEG_SIZE)
580 data_size = remaining_data;
582 data_size = SMALL_SEG_SIZE;
583 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
584 if (buf_ptr == NULL) {
586 "Not enough space in the 1st buffer\n");
590 if (data_ptr != NULL) {
591 /* Copy characters without NULL terminator */
592 strncpy(buf_ptr, data_ptr, data_size);
593 data_ptr += data_size;
595 remaining_data -= data_size;
596 num_remaining_segs--;
599 * Allocate the rest of the segments,
600 * copy the rest of the data and chain the segments.
602 for (i = 0; i < num_remaining_segs; i++) {
604 if (i == (num_remaining_segs - 1)) {
606 if (remaining_data > SMALL_SEG_SIZE)
607 pool = large_mbuf_pool;
609 pool = small_mbuf_pool;
610 data_size = remaining_data;
612 data_size = SMALL_SEG_SIZE;
613 pool = small_mbuf_pool;
616 next_seg = rte_pktmbuf_alloc(pool);
617 if (next_seg == NULL) {
619 "New segment could not be allocated "
620 "from the mempool\n");
623 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
624 if (buf_ptr == NULL) {
626 "Not enough space in the buffer\n");
627 rte_pktmbuf_free(next_seg);
630 if (data_ptr != NULL) {
631 /* Copy characters without NULL terminator */
632 strncpy(buf_ptr, data_ptr, data_size);
633 data_ptr += data_size;
635 remaining_data -= data_size;
637 ret = rte_pktmbuf_chain(head_buf, next_seg);
639 rte_pktmbuf_free(next_seg);
641 "Segment could not chained\n");
650 * Compresses and decompresses buffer with compressdev API and Zlib API
653 test_deflate_comp_decomp(const char * const test_bufs[],
654 unsigned int num_bufs,
656 struct rte_comp_xform *compress_xforms[],
657 struct rte_comp_xform *decompress_xforms[],
658 unsigned int num_xforms,
659 enum rte_comp_op_type state,
661 enum zlib_direction zlib_dir)
663 struct comp_testsuite_params *ts_params = &testsuite_params;
666 struct rte_mbuf *uncomp_bufs[num_bufs];
667 struct rte_mbuf *comp_bufs[num_bufs];
668 struct rte_comp_op *ops[num_bufs];
669 struct rte_comp_op *ops_processed[num_bufs];
670 void *priv_xforms[num_bufs];
671 uint16_t num_enqd, num_deqd, num_total_deqd;
672 uint16_t num_priv_xforms = 0;
673 unsigned int deqd_retries = 0;
674 struct priv_op_data *priv_data;
677 struct rte_mempool *buf_pool;
679 const struct rte_compressdev_capabilities *capa =
680 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
681 char *contig_buf = NULL;
682 uint64_t compress_checksum[num_bufs];
684 /* Initialize all arrays to NULL */
685 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
686 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
687 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
688 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
689 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
692 buf_pool = ts_params->small_mbuf_pool;
694 buf_pool = ts_params->large_mbuf_pool;
696 /* Prepare the source mbufs with the data */
697 ret = rte_pktmbuf_alloc_bulk(buf_pool,
698 uncomp_bufs, num_bufs);
701 "Source mbufs could not be allocated "
702 "from the mempool\n");
707 for (i = 0; i < num_bufs; i++) {
708 data_size = strlen(test_bufs[i]) + 1;
709 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
711 ts_params->small_mbuf_pool,
712 ts_params->large_mbuf_pool,
717 for (i = 0; i < num_bufs; i++) {
718 data_size = strlen(test_bufs[i]) + 1;
719 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
720 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
724 /* Prepare the destination mbufs */
725 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
728 "Destination mbufs could not be allocated "
729 "from the mempool\n");
734 for (i = 0; i < num_bufs; i++) {
735 data_size = strlen(test_bufs[i]) *
736 COMPRESS_BUF_SIZE_RATIO;
737 if (prepare_sgl_bufs(NULL, comp_bufs[i],
739 ts_params->small_mbuf_pool,
740 ts_params->large_mbuf_pool,
746 for (i = 0; i < num_bufs; i++) {
747 data_size = strlen(test_bufs[i]) *
748 COMPRESS_BUF_SIZE_RATIO;
749 rte_pktmbuf_append(comp_bufs[i], data_size);
753 /* Build the compression operations */
754 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
757 "Compress operations could not be allocated "
758 "from the mempool\n");
763 for (i = 0; i < num_bufs; i++) {
764 ops[i]->m_src = uncomp_bufs[i];
765 ops[i]->m_dst = comp_bufs[i];
766 ops[i]->src.offset = 0;
767 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
768 ops[i]->dst.offset = 0;
769 if (state == RTE_COMP_OP_STATELESS) {
770 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
773 "Stateful operations are not supported "
774 "in these tests yet\n");
777 ops[i]->input_chksum = 0;
779 * Store original operation index in private data,
780 * since ordering does not have to be maintained,
781 * when dequeueing from compressdev, so a comparison
782 * at the end of the test can be done.
784 priv_data = (struct priv_op_data *) (ops[i] + 1);
785 priv_data->orig_idx = i;
788 /* Compress data (either with Zlib API or compressdev API */
789 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
790 for (i = 0; i < num_bufs; i++) {
791 const struct rte_comp_xform *compress_xform =
792 compress_xforms[i % num_xforms];
793 ret = compress_zlib(ops[i], compress_xform,
798 ops_processed[i] = ops[i];
801 /* Create compress private xform data */
802 for (i = 0; i < num_xforms; i++) {
803 ret = rte_compressdev_private_xform_create(0,
804 (const struct rte_comp_xform *)compress_xforms[i],
808 "Compression private xform "
809 "could not be created\n");
815 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
816 /* Attach shareable private xform data to ops */
817 for (i = 0; i < num_bufs; i++)
818 ops[i]->private_xform = priv_xforms[i % num_xforms];
820 /* Create rest of the private xforms for the other ops */
821 for (i = num_xforms; i < num_bufs; i++) {
822 ret = rte_compressdev_private_xform_create(0,
823 compress_xforms[i % num_xforms],
827 "Compression private xform "
828 "could not be created\n");
834 /* Attach non shareable private xform data to ops */
835 for (i = 0; i < num_bufs; i++)
836 ops[i]->private_xform = priv_xforms[i];
839 /* Enqueue and dequeue all operations */
840 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
841 if (num_enqd < num_bufs) {
843 "The operations could not be enqueued\n");
850 * If retrying a dequeue call, wait for 10 ms to allow
851 * enough time to the driver to process the operations
853 if (deqd_retries != 0) {
855 * Avoid infinite loop if not all the
856 * operations get out of the device
858 if (deqd_retries == MAX_DEQD_RETRIES) {
860 "Not all operations could be "
864 usleep(DEQUEUE_WAIT_TIME);
866 num_deqd = rte_compressdev_dequeue_burst(0, 0,
867 &ops_processed[num_total_deqd], num_bufs);
868 num_total_deqd += num_deqd;
871 } while (num_total_deqd < num_enqd);
875 /* Free compress private xforms */
876 for (i = 0; i < num_priv_xforms; i++) {
877 rte_compressdev_private_xform_free(0, priv_xforms[i]);
878 priv_xforms[i] = NULL;
883 for (i = 0; i < num_bufs; i++) {
884 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
885 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
886 const struct rte_comp_compress_xform *compress_xform =
887 &compress_xforms[xform_idx]->compress;
888 enum rte_comp_huffman huffman_type =
889 compress_xform->deflate.huffman;
890 char engine[] = "zlib (directly, not PMD)";
891 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
892 strlcpy(engine, "PMD", sizeof(engine));
894 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
895 " %u bytes (level = %d, huffman = %s)\n",
896 buf_idx[priv_data->orig_idx], engine,
897 ops_processed[i]->consumed, ops_processed[i]->produced,
898 compress_xform->level,
899 huffman_type_strings[huffman_type]);
900 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
901 ops_processed[i]->consumed == 0 ? 0 :
902 (float)ops_processed[i]->produced /
903 ops_processed[i]->consumed * 100);
904 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
905 compress_checksum[i] = ops_processed[i]->output_chksum;
910 * Check operation status and free source mbufs (destination mbuf and
911 * compress operation information is needed for the decompression stage)
913 for (i = 0; i < num_bufs; i++) {
914 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
916 "Some operations were not successful\n");
919 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
920 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
921 uncomp_bufs[priv_data->orig_idx] = NULL;
924 /* Allocate buffers for decompressed data */
925 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
928 "Destination mbufs could not be allocated "
929 "from the mempool\n");
934 for (i = 0; i < num_bufs; i++) {
935 priv_data = (struct priv_op_data *)
936 (ops_processed[i] + 1);
937 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
938 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
940 ts_params->small_mbuf_pool,
941 ts_params->large_mbuf_pool,
947 for (i = 0; i < num_bufs; i++) {
948 priv_data = (struct priv_op_data *)
949 (ops_processed[i] + 1);
950 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
951 rte_pktmbuf_append(uncomp_bufs[i], data_size);
955 /* Build the decompression operations */
956 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
959 "Decompress operations could not be allocated "
960 "from the mempool\n");
964 /* Source buffer is the compressed data from the previous operations */
965 for (i = 0; i < num_bufs; i++) {
966 ops[i]->m_src = ops_processed[i]->m_dst;
967 ops[i]->m_dst = uncomp_bufs[i];
968 ops[i]->src.offset = 0;
970 * Set the length of the compressed data to the
971 * number of bytes that were produced in the previous stage
973 ops[i]->src.length = ops_processed[i]->produced;
974 ops[i]->dst.offset = 0;
975 if (state == RTE_COMP_OP_STATELESS) {
976 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
979 "Stateful operations are not supported "
980 "in these tests yet\n");
983 ops[i]->input_chksum = 0;
985 * Copy private data from previous operations,
986 * to keep the pointer to the original buffer
988 memcpy(ops[i] + 1, ops_processed[i] + 1,
989 sizeof(struct priv_op_data));
993 * Free the previous compress operations,
994 * as they are not needed anymore
996 rte_comp_op_bulk_free(ops_processed, num_bufs);
998 /* Decompress data (either with Zlib API or compressdev API */
999 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1000 for (i = 0; i < num_bufs; i++) {
1001 priv_data = (struct priv_op_data *)(ops[i] + 1);
1002 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1003 const struct rte_comp_xform *decompress_xform =
1004 decompress_xforms[xform_idx];
1006 ret = decompress_zlib(ops[i], decompress_xform);
1010 ops_processed[i] = ops[i];
1013 /* Create decompress private xform data */
1014 for (i = 0; i < num_xforms; i++) {
1015 ret = rte_compressdev_private_xform_create(0,
1016 (const struct rte_comp_xform *)decompress_xforms[i],
1020 "Decompression private xform "
1021 "could not be created\n");
1027 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1028 /* Attach shareable private xform data to ops */
1029 for (i = 0; i < num_bufs; i++) {
1030 priv_data = (struct priv_op_data *)(ops[i] + 1);
1031 uint16_t xform_idx = priv_data->orig_idx %
1033 ops[i]->private_xform = priv_xforms[xform_idx];
1036 /* Create rest of the private xforms for the other ops */
1037 for (i = num_xforms; i < num_bufs; i++) {
1038 ret = rte_compressdev_private_xform_create(0,
1039 decompress_xforms[i % num_xforms],
1043 "Decompression private xform "
1044 "could not be created\n");
1050 /* Attach non shareable private xform data to ops */
1051 for (i = 0; i < num_bufs; i++) {
1052 priv_data = (struct priv_op_data *)(ops[i] + 1);
1053 uint16_t xform_idx = priv_data->orig_idx;
1054 ops[i]->private_xform = priv_xforms[xform_idx];
1058 /* Enqueue and dequeue all operations */
1059 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1060 if (num_enqd < num_bufs) {
1062 "The operations could not be enqueued\n");
1069 * If retrying a dequeue call, wait for 10 ms to allow
1070 * enough time to the driver to process the operations
1072 if (deqd_retries != 0) {
1074 * Avoid infinite loop if not all the
1075 * operations get out of the device
1077 if (deqd_retries == MAX_DEQD_RETRIES) {
1079 "Not all operations could be "
1083 usleep(DEQUEUE_WAIT_TIME);
1085 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1086 &ops_processed[num_total_deqd], num_bufs);
1087 num_total_deqd += num_deqd;
1089 } while (num_total_deqd < num_enqd);
1094 for (i = 0; i < num_bufs; i++) {
1095 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1096 char engine[] = "zlib, (directly, no PMD)";
1097 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1098 strlcpy(engine, "pmd", sizeof(engine));
1099 RTE_LOG(DEBUG, USER1,
1100 "Buffer %u decompressed by %s from %u to %u bytes\n",
1101 buf_idx[priv_data->orig_idx], engine,
1102 ops_processed[i]->consumed, ops_processed[i]->produced);
1107 * Check operation status and free source mbuf (destination mbuf and
1108 * compress operation information is still needed)
1110 for (i = 0; i < num_bufs; i++) {
1111 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1113 "Some operations were not successful\n");
1116 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1117 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1118 comp_bufs[priv_data->orig_idx] = NULL;
1122 * Compare the original stream with the decompressed stream
1123 * (in size and the data)
1125 for (i = 0; i < num_bufs; i++) {
1126 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1127 const char *buf1 = test_bufs[priv_data->orig_idx];
1129 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1130 if (contig_buf == NULL) {
1131 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1136 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1137 ops_processed[i]->produced, contig_buf);
1138 if (compare_buffers(buf1, strlen(buf1) + 1,
1139 buf2, ops_processed[i]->produced) < 0)
1142 /* Test checksums */
1143 if (compress_xforms[0]->compress.chksum !=
1144 RTE_COMP_CHECKSUM_NONE) {
1145 if (ops_processed[i]->output_chksum !=
1146 compress_checksum[i]) {
1147 RTE_LOG(ERR, USER1, "The checksums differ\n"
1148 "Compression Checksum: %" PRIu64 "\tDecompression "
1149 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1150 ops_processed[i]->output_chksum);
1155 rte_free(contig_buf);
1162 /* Free resources */
1163 for (i = 0; i < num_bufs; i++) {
1164 rte_pktmbuf_free(uncomp_bufs[i]);
1165 rte_pktmbuf_free(comp_bufs[i]);
1166 rte_comp_op_free(ops[i]);
1167 rte_comp_op_free(ops_processed[i]);
1169 for (i = 0; i < num_priv_xforms; i++) {
1170 if (priv_xforms[i] != NULL)
1171 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1173 rte_free(contig_buf);
1179 test_compressdev_deflate_stateless_fixed(void)
1181 struct comp_testsuite_params *ts_params = &testsuite_params;
1182 const char *test_buffer;
1185 const struct rte_compressdev_capabilities *capab;
1187 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1188 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1190 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1193 struct rte_comp_xform *compress_xform =
1194 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1196 if (compress_xform == NULL) {
1198 "Compress xform could not be created\n");
1203 memcpy(compress_xform, ts_params->def_comp_xform,
1204 sizeof(struct rte_comp_xform));
1205 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1207 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1208 test_buffer = compress_test_bufs[i];
1210 /* Compress with compressdev, decompress with Zlib */
1211 if (test_deflate_comp_decomp(&test_buffer, 1,
1214 &ts_params->def_decomp_xform,
1216 RTE_COMP_OP_STATELESS,
1218 ZLIB_DECOMPRESS) < 0) {
1223 /* Compress with Zlib, decompress with compressdev */
1224 if (test_deflate_comp_decomp(&test_buffer, 1,
1227 &ts_params->def_decomp_xform,
1229 RTE_COMP_OP_STATELESS,
1231 ZLIB_COMPRESS) < 0) {
1240 rte_free(compress_xform);
1245 test_compressdev_deflate_stateless_dynamic(void)
1247 struct comp_testsuite_params *ts_params = &testsuite_params;
1248 const char *test_buffer;
1251 struct rte_comp_xform *compress_xform =
1252 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1254 const struct rte_compressdev_capabilities *capab;
1256 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1257 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1259 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1262 if (compress_xform == NULL) {
1264 "Compress xform could not be created\n");
1269 memcpy(compress_xform, ts_params->def_comp_xform,
1270 sizeof(struct rte_comp_xform));
1271 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1273 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1274 test_buffer = compress_test_bufs[i];
1276 /* Compress with compressdev, decompress with Zlib */
1277 if (test_deflate_comp_decomp(&test_buffer, 1,
1280 &ts_params->def_decomp_xform,
1282 RTE_COMP_OP_STATELESS,
1284 ZLIB_DECOMPRESS) < 0) {
1289 /* Compress with Zlib, decompress with compressdev */
1290 if (test_deflate_comp_decomp(&test_buffer, 1,
1293 &ts_params->def_decomp_xform,
1295 RTE_COMP_OP_STATELESS,
1297 ZLIB_COMPRESS) < 0) {
1306 rte_free(compress_xform);
1311 test_compressdev_deflate_stateless_multi_op(void)
1313 struct comp_testsuite_params *ts_params = &testsuite_params;
1314 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1315 uint16_t buf_idx[num_bufs];
1318 for (i = 0; i < num_bufs; i++)
1321 /* Compress with compressdev, decompress with Zlib */
1322 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1324 &ts_params->def_comp_xform,
1325 &ts_params->def_decomp_xform,
1327 RTE_COMP_OP_STATELESS,
1329 ZLIB_DECOMPRESS) < 0)
1332 /* Compress with Zlib, decompress with compressdev */
1333 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1335 &ts_params->def_comp_xform,
1336 &ts_params->def_decomp_xform,
1338 RTE_COMP_OP_STATELESS,
1343 return TEST_SUCCESS;
1347 test_compressdev_deflate_stateless_multi_level(void)
1349 struct comp_testsuite_params *ts_params = &testsuite_params;
1350 const char *test_buffer;
1354 struct rte_comp_xform *compress_xform =
1355 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1357 if (compress_xform == NULL) {
1359 "Compress xform could not be created\n");
1364 memcpy(compress_xform, ts_params->def_comp_xform,
1365 sizeof(struct rte_comp_xform));
1367 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1368 test_buffer = compress_test_bufs[i];
1369 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1371 compress_xform->compress.level = level;
1372 /* Compress with compressdev, decompress with Zlib */
1373 if (test_deflate_comp_decomp(&test_buffer, 1,
1376 &ts_params->def_decomp_xform,
1378 RTE_COMP_OP_STATELESS,
1380 ZLIB_DECOMPRESS) < 0) {
1390 rte_free(compress_xform);
1394 #define NUM_XFORMS 3
1396 test_compressdev_deflate_stateless_multi_xform(void)
1398 struct comp_testsuite_params *ts_params = &testsuite_params;
1399 uint16_t num_bufs = NUM_XFORMS;
1400 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1401 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1402 const char *test_buffers[NUM_XFORMS];
1404 unsigned int level = RTE_COMP_LEVEL_MIN;
1405 uint16_t buf_idx[num_bufs];
1409 /* Create multiple xforms with various levels */
1410 for (i = 0; i < NUM_XFORMS; i++) {
1411 compress_xforms[i] = rte_malloc(NULL,
1412 sizeof(struct rte_comp_xform), 0);
1413 if (compress_xforms[i] == NULL) {
1415 "Compress xform could not be created\n");
1420 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1421 sizeof(struct rte_comp_xform));
1422 compress_xforms[i]->compress.level = level;
1425 decompress_xforms[i] = rte_malloc(NULL,
1426 sizeof(struct rte_comp_xform), 0);
1427 if (decompress_xforms[i] == NULL) {
1429 "Decompress xform could not be created\n");
1434 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1435 sizeof(struct rte_comp_xform));
1438 for (i = 0; i < NUM_XFORMS; i++) {
1440 /* Use the same buffer in all sessions */
1441 test_buffers[i] = compress_test_bufs[0];
1443 /* Compress with compressdev, decompress with Zlib */
1444 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1449 RTE_COMP_OP_STATELESS,
1451 ZLIB_DECOMPRESS) < 0) {
1458 for (i = 0; i < NUM_XFORMS; i++) {
1459 rte_free(compress_xforms[i]);
1460 rte_free(decompress_xforms[i]);
1467 test_compressdev_deflate_stateless_sgl(void)
1469 struct comp_testsuite_params *ts_params = &testsuite_params;
1471 const char *test_buffer;
1472 const struct rte_compressdev_capabilities *capab;
1474 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1475 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1477 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1480 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1481 test_buffer = compress_test_bufs[i];
1482 /* Compress with compressdev, decompress with Zlib */
1483 if (test_deflate_comp_decomp(&test_buffer, 1,
1485 &ts_params->def_comp_xform,
1486 &ts_params->def_decomp_xform,
1488 RTE_COMP_OP_STATELESS,
1490 ZLIB_DECOMPRESS) < 0)
1493 /* Compress with Zlib, decompress with compressdev */
1494 if (test_deflate_comp_decomp(&test_buffer, 1,
1496 &ts_params->def_comp_xform,
1497 &ts_params->def_decomp_xform,
1499 RTE_COMP_OP_STATELESS,
1505 return TEST_SUCCESS;
1510 test_compressdev_deflate_stateless_checksum(void)
1512 struct comp_testsuite_params *ts_params = &testsuite_params;
1513 const char *test_buffer;
1516 const struct rte_compressdev_capabilities *capab;
1518 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1519 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1521 /* Check if driver supports any checksum */
1522 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1523 (capab->comp_feature_flags &
1524 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1525 (capab->comp_feature_flags &
1526 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1529 struct rte_comp_xform *compress_xform =
1530 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1531 if (compress_xform == NULL) {
1532 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1537 memcpy(compress_xform, ts_params->def_comp_xform,
1538 sizeof(struct rte_comp_xform));
1540 struct rte_comp_xform *decompress_xform =
1541 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1542 if (decompress_xform == NULL) {
1543 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1544 rte_free(compress_xform);
1549 memcpy(decompress_xform, ts_params->def_decomp_xform,
1550 sizeof(struct rte_comp_xform));
1552 /* Check if driver supports crc32 checksum and test */
1553 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1554 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1555 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1557 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1558 test_buffer = compress_test_bufs[i];
1560 /* Generate zlib checksum and test against selected
1561 * drivers decompression checksum
1563 if (test_deflate_comp_decomp(&test_buffer, 1,
1568 RTE_COMP_OP_STATELESS,
1570 ZLIB_COMPRESS) < 0) {
1575 /* Generate compression and decompression
1576 * checksum of selected driver
1578 if (test_deflate_comp_decomp(&test_buffer, 1,
1583 RTE_COMP_OP_STATELESS,
1592 /* Check if driver supports adler32 checksum and test */
1593 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1594 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1595 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1597 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1598 test_buffer = compress_test_bufs[i];
1600 /* Generate zlib checksum and test against selected
1601 * drivers decompression checksum
1603 if (test_deflate_comp_decomp(&test_buffer, 1,
1608 RTE_COMP_OP_STATELESS,
1610 ZLIB_COMPRESS) < 0) {
1614 /* Generate compression and decompression
1615 * checksum of selected driver
1617 if (test_deflate_comp_decomp(&test_buffer, 1,
1622 RTE_COMP_OP_STATELESS,
1631 /* Check if driver supports combined crc and adler checksum and test */
1632 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1633 compress_xform->compress.chksum =
1634 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1635 decompress_xform->decompress.chksum =
1636 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1638 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1639 test_buffer = compress_test_bufs[i];
1641 /* Generate compression and decompression
1642 * checksum of selected driver
1644 if (test_deflate_comp_decomp(&test_buffer, 1,
1649 RTE_COMP_OP_STATELESS,
1661 rte_free(compress_xform);
1662 rte_free(decompress_xform);
1666 static struct unit_test_suite compressdev_testsuite = {
1667 .suite_name = "compressdev unit test suite",
1668 .setup = testsuite_setup,
1669 .teardown = testsuite_teardown,
1670 .unit_test_cases = {
1671 TEST_CASE_ST(NULL, NULL,
1672 test_compressdev_invalid_configuration),
1673 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1674 test_compressdev_deflate_stateless_fixed),
1675 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1676 test_compressdev_deflate_stateless_dynamic),
1677 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1678 test_compressdev_deflate_stateless_multi_op),
1679 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1680 test_compressdev_deflate_stateless_multi_level),
1681 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1682 test_compressdev_deflate_stateless_multi_xform),
1683 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1684 test_compressdev_deflate_stateless_sgl),
1685 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1686 test_compressdev_deflate_stateless_checksum),
1687 TEST_CASES_END() /**< NULL terminate unit test array */
1692 test_compressdev(void)
1694 return unit_test_suite_runner(&compressdev_testsuite);
1697 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);