1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
15 #include "test_compressdev_test_buffer.h"
18 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
26 * 30% extra size for compressed data compared to original data,
27 * in case data size cannot be reduced and it is actually bigger
28 * due to the compress block headers
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
39 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
40 #define ZLIB_HEADER_SIZE 2
41 #define ZLIB_TRAILER_SIZE 4
42 #define GZIP_HEADER_SIZE 10
43 #define GZIP_TRAILER_SIZE 8
45 #define OUT_OF_SPACE_BUF 1
48 huffman_type_strings[] = {
49 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
50 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
51 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
62 LB_BOTH = 0, /* both input and output are linear*/
63 SGL_BOTH, /* both input and output are chained */
64 SGL_TO_LB, /* input buffer is chained */
65 LB_TO_SGL /* output buffer is chained */
72 struct comp_testsuite_params {
73 struct rte_mempool *large_mbuf_pool;
74 struct rte_mempool *small_mbuf_pool;
75 struct rte_mempool *op_pool;
76 struct rte_comp_xform *def_comp_xform;
77 struct rte_comp_xform *def_decomp_xform;
80 struct interim_data_params {
81 const char * const *test_bufs;
82 unsigned int num_bufs;
84 struct rte_comp_xform **compress_xforms;
85 struct rte_comp_xform **decompress_xforms;
86 unsigned int num_xforms;
89 struct test_data_params {
90 enum rte_comp_op_type state;
91 enum varied_buff buff_type;
92 enum zlib_direction zlib_dir;
93 unsigned int out_of_space;
96 static struct comp_testsuite_params testsuite_params = { 0 };
99 testsuite_teardown(void)
101 struct comp_testsuite_params *ts_params = &testsuite_params;
103 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
104 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
105 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
106 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
107 if (rte_mempool_in_use_count(ts_params->op_pool))
108 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
110 rte_mempool_free(ts_params->large_mbuf_pool);
111 rte_mempool_free(ts_params->small_mbuf_pool);
112 rte_mempool_free(ts_params->op_pool);
113 rte_free(ts_params->def_comp_xform);
114 rte_free(ts_params->def_decomp_xform);
118 testsuite_setup(void)
120 struct comp_testsuite_params *ts_params = &testsuite_params;
121 uint32_t max_buf_size = 0;
124 if (rte_compressdev_count() == 0) {
125 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
129 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
130 rte_compressdev_name_get(0));
132 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
133 max_buf_size = RTE_MAX(max_buf_size,
134 strlen(compress_test_bufs[i]) + 1);
137 * Buffers to be used in compression and decompression.
138 * Since decompressed data might be larger than
139 * compressed data (due to block header),
140 * buffers should be big enough for both cases.
142 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
143 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
146 max_buf_size + RTE_PKTMBUF_HEADROOM,
148 if (ts_params->large_mbuf_pool == NULL) {
149 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
153 /* Create mempool with smaller buffers for SGL testing */
154 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
155 NUM_LARGE_MBUFS * MAX_SEGS,
157 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
159 if (ts_params->small_mbuf_pool == NULL) {
160 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
164 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
165 0, sizeof(struct priv_op_data),
167 if (ts_params->op_pool == NULL) {
168 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
172 ts_params->def_comp_xform =
173 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
174 if (ts_params->def_comp_xform == NULL) {
176 "Default compress xform could not be created\n");
179 ts_params->def_decomp_xform =
180 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
181 if (ts_params->def_decomp_xform == NULL) {
183 "Default decompress xform could not be created\n");
187 /* Initializes default values for compress/decompress xforms */
188 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
189 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
190 ts_params->def_comp_xform->compress.deflate.huffman =
191 RTE_COMP_HUFFMAN_DEFAULT;
192 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
193 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
194 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
196 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
197 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
198 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
199 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
204 testsuite_teardown();
210 generic_ut_setup(void)
212 /* Configure compressdev (one device, one queue pair) */
213 struct rte_compressdev_config config = {
214 .socket_id = rte_socket_id(),
216 .max_nb_priv_xforms = NUM_MAX_XFORMS,
220 if (rte_compressdev_configure(0, &config) < 0) {
221 RTE_LOG(ERR, USER1, "Device configuration failed\n");
225 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
226 rte_socket_id()) < 0) {
227 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
231 if (rte_compressdev_start(0) < 0) {
232 RTE_LOG(ERR, USER1, "Device could not be started\n");
240 generic_ut_teardown(void)
242 rte_compressdev_stop(0);
243 if (rte_compressdev_close(0) < 0)
244 RTE_LOG(ERR, USER1, "Device could not be closed\n");
248 test_compressdev_invalid_configuration(void)
250 struct rte_compressdev_config invalid_config;
251 struct rte_compressdev_config valid_config = {
252 .socket_id = rte_socket_id(),
254 .max_nb_priv_xforms = NUM_MAX_XFORMS,
257 struct rte_compressdev_info dev_info;
259 /* Invalid configuration with 0 queue pairs */
260 memcpy(&invalid_config, &valid_config,
261 sizeof(struct rte_compressdev_config));
262 invalid_config.nb_queue_pairs = 0;
264 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
265 "Device configuration was successful "
266 "with no queue pairs (invalid)\n");
269 * Invalid configuration with too many queue pairs
270 * (if there is an actual maximum number of queue pairs)
272 rte_compressdev_info_get(0, &dev_info);
273 if (dev_info.max_nb_queue_pairs != 0) {
274 memcpy(&invalid_config, &valid_config,
275 sizeof(struct rte_compressdev_config));
276 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
278 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
279 "Device configuration was successful "
280 "with too many queue pairs (invalid)\n");
283 /* Invalid queue pair setup, with no number of queue pairs set */
284 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
285 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
286 "Queue pair setup was successful "
287 "with no queue pairs set (invalid)\n");
293 compare_buffers(const char *buffer1, uint32_t buffer1_len,
294 const char *buffer2, uint32_t buffer2_len)
296 if (buffer1_len != buffer2_len) {
297 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
301 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
302 RTE_LOG(ERR, USER1, "Buffers are different\n");
310 * Maps compressdev and Zlib flush flags
313 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
316 case RTE_COMP_FLUSH_NONE:
318 case RTE_COMP_FLUSH_SYNC:
320 case RTE_COMP_FLUSH_FULL:
322 case RTE_COMP_FLUSH_FINAL:
325 * There should be only the values above,
326 * so this should never happen
334 compress_zlib(struct rte_comp_op *op,
335 const struct rte_comp_xform *xform, int mem_level)
339 int strategy, window_bits, comp_level;
340 int ret = TEST_FAILED;
341 uint8_t *single_src_buf = NULL;
342 uint8_t *single_dst_buf = NULL;
344 /* initialize zlib stream */
345 stream.zalloc = Z_NULL;
346 stream.zfree = Z_NULL;
347 stream.opaque = Z_NULL;
349 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
352 strategy = Z_DEFAULT_STRATEGY;
355 * Window bits is the base two logarithm of the window size (in bytes).
356 * When doing raw DEFLATE, this number will be negative.
358 window_bits = -(xform->compress.window_size);
359 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
361 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
362 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
364 comp_level = xform->compress.level;
366 if (comp_level != RTE_COMP_LEVEL_NONE)
367 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
368 window_bits, mem_level, strategy);
370 ret = deflateInit(&stream, Z_NO_COMPRESSION);
373 printf("Zlib deflate could not be initialized\n");
377 /* Assuming stateless operation */
379 if (op->m_src->nb_segs > 1) {
380 single_src_buf = rte_malloc(NULL,
381 rte_pktmbuf_pkt_len(op->m_src), 0);
382 if (single_src_buf == NULL) {
383 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
387 if (rte_pktmbuf_read(op->m_src, op->src.offset,
388 rte_pktmbuf_pkt_len(op->m_src) -
390 single_src_buf) == NULL) {
392 "Buffer could not be read entirely\n");
396 stream.avail_in = op->src.length;
397 stream.next_in = single_src_buf;
400 stream.avail_in = op->src.length;
401 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
405 if (op->m_dst->nb_segs > 1) {
407 single_dst_buf = rte_malloc(NULL,
408 rte_pktmbuf_pkt_len(op->m_dst), 0);
409 if (single_dst_buf == NULL) {
411 "Buffer could not be allocated\n");
415 stream.avail_out = op->m_dst->pkt_len;
416 stream.next_out = single_dst_buf;
418 } else {/* linear output */
419 stream.avail_out = op->m_dst->data_len;
420 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
424 /* Stateless operation, all buffer will be compressed in one go */
425 zlib_flush = map_zlib_flush_flag(op->flush_flag);
426 ret = deflate(&stream, zlib_flush);
428 if (stream.avail_in != 0) {
429 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
433 if (ret != Z_STREAM_END)
436 /* Copy data to destination SGL */
437 if (op->m_dst->nb_segs > 1) {
438 uint32_t remaining_data = stream.total_out;
439 uint8_t *src_data = single_dst_buf;
440 struct rte_mbuf *dst_buf = op->m_dst;
442 while (remaining_data > 0) {
443 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
444 uint8_t *, op->dst.offset);
446 if (remaining_data < dst_buf->data_len) {
447 memcpy(dst_data, src_data, remaining_data);
450 memcpy(dst_data, src_data, dst_buf->data_len);
451 remaining_data -= dst_buf->data_len;
452 src_data += dst_buf->data_len;
453 dst_buf = dst_buf->next;
458 op->consumed = stream.total_in;
459 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
460 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
461 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
462 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
464 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
465 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
466 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
467 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
470 op->produced = stream.total_out;
472 op->status = RTE_COMP_OP_STATUS_SUCCESS;
473 op->output_chksum = stream.adler;
475 deflateReset(&stream);
480 rte_free(single_src_buf);
481 rte_free(single_dst_buf);
487 decompress_zlib(struct rte_comp_op *op,
488 const struct rte_comp_xform *xform)
493 int ret = TEST_FAILED;
494 uint8_t *single_src_buf = NULL;
495 uint8_t *single_dst_buf = NULL;
497 /* initialize zlib stream */
498 stream.zalloc = Z_NULL;
499 stream.zfree = Z_NULL;
500 stream.opaque = Z_NULL;
503 * Window bits is the base two logarithm of the window size (in bytes).
504 * When doing raw DEFLATE, this number will be negative.
506 window_bits = -(xform->decompress.window_size);
507 ret = inflateInit2(&stream, window_bits);
510 printf("Zlib deflate could not be initialized\n");
514 /* Assuming stateless operation */
516 if (op->m_src->nb_segs > 1) {
517 single_src_buf = rte_malloc(NULL,
518 rte_pktmbuf_pkt_len(op->m_src), 0);
519 if (single_src_buf == NULL) {
520 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
523 single_dst_buf = rte_malloc(NULL,
524 rte_pktmbuf_pkt_len(op->m_dst), 0);
525 if (single_dst_buf == NULL) {
526 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
529 if (rte_pktmbuf_read(op->m_src, 0,
530 rte_pktmbuf_pkt_len(op->m_src),
531 single_src_buf) == NULL) {
533 "Buffer could not be read entirely\n");
537 stream.avail_in = op->src.length;
538 stream.next_in = single_src_buf;
539 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
540 stream.next_out = single_dst_buf;
543 stream.avail_in = op->src.length;
544 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
545 stream.avail_out = op->m_dst->data_len;
546 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
549 /* Stateless operation, all buffer will be compressed in one go */
550 zlib_flush = map_zlib_flush_flag(op->flush_flag);
551 ret = inflate(&stream, zlib_flush);
553 if (stream.avail_in != 0) {
554 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
558 if (ret != Z_STREAM_END)
561 if (op->m_src->nb_segs > 1) {
562 uint32_t remaining_data = stream.total_out;
563 uint8_t *src_data = single_dst_buf;
564 struct rte_mbuf *dst_buf = op->m_dst;
566 while (remaining_data > 0) {
567 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
570 if (remaining_data < dst_buf->data_len) {
571 memcpy(dst_data, src_data, remaining_data);
574 memcpy(dst_data, src_data, dst_buf->data_len);
575 remaining_data -= dst_buf->data_len;
576 src_data += dst_buf->data_len;
577 dst_buf = dst_buf->next;
582 op->consumed = stream.total_in;
583 op->produced = stream.total_out;
584 op->status = RTE_COMP_OP_STATUS_SUCCESS;
586 inflateReset(&stream);
596 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
597 uint32_t total_data_size,
598 struct rte_mempool *small_mbuf_pool,
599 struct rte_mempool *large_mbuf_pool,
600 uint8_t limit_segs_in_sgl)
602 uint32_t remaining_data = total_data_size;
603 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
604 struct rte_mempool *pool;
605 struct rte_mbuf *next_seg;
608 const char *data_ptr = test_buf;
612 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
613 num_remaining_segs = limit_segs_in_sgl - 1;
616 * Allocate data in the first segment (header) and
617 * copy data if test buffer is provided
619 if (remaining_data < SMALL_SEG_SIZE)
620 data_size = remaining_data;
622 data_size = SMALL_SEG_SIZE;
623 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
624 if (buf_ptr == NULL) {
626 "Not enough space in the 1st buffer\n");
630 if (data_ptr != NULL) {
631 /* Copy characters without NULL terminator */
632 strncpy(buf_ptr, data_ptr, data_size);
633 data_ptr += data_size;
635 remaining_data -= data_size;
636 num_remaining_segs--;
639 * Allocate the rest of the segments,
640 * copy the rest of the data and chain the segments.
642 for (i = 0; i < num_remaining_segs; i++) {
644 if (i == (num_remaining_segs - 1)) {
646 if (remaining_data > SMALL_SEG_SIZE)
647 pool = large_mbuf_pool;
649 pool = small_mbuf_pool;
650 data_size = remaining_data;
652 data_size = SMALL_SEG_SIZE;
653 pool = small_mbuf_pool;
656 next_seg = rte_pktmbuf_alloc(pool);
657 if (next_seg == NULL) {
659 "New segment could not be allocated "
660 "from the mempool\n");
663 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
664 if (buf_ptr == NULL) {
666 "Not enough space in the buffer\n");
667 rte_pktmbuf_free(next_seg);
670 if (data_ptr != NULL) {
671 /* Copy characters without NULL terminator */
672 strncpy(buf_ptr, data_ptr, data_size);
673 data_ptr += data_size;
675 remaining_data -= data_size;
677 ret = rte_pktmbuf_chain(head_buf, next_seg);
679 rte_pktmbuf_free(next_seg);
681 "Segment could not chained\n");
690 * Compresses and decompresses buffer with compressdev API and Zlib API
693 test_deflate_comp_decomp(const struct interim_data_params *int_data,
694 const struct test_data_params *test_data)
696 struct comp_testsuite_params *ts_params = &testsuite_params;
697 const char * const *test_bufs = int_data->test_bufs;
698 unsigned int num_bufs = int_data->num_bufs;
699 uint16_t *buf_idx = int_data->buf_idx;
700 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
701 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
702 unsigned int num_xforms = int_data->num_xforms;
703 enum rte_comp_op_type state = test_data->state;
704 unsigned int buff_type = test_data->buff_type;
705 unsigned int out_of_space = test_data->out_of_space;
706 enum zlib_direction zlib_dir = test_data->zlib_dir;
709 struct rte_mbuf *uncomp_bufs[num_bufs];
710 struct rte_mbuf *comp_bufs[num_bufs];
711 struct rte_comp_op *ops[num_bufs];
712 struct rte_comp_op *ops_processed[num_bufs];
713 void *priv_xforms[num_bufs];
714 uint16_t num_enqd, num_deqd, num_total_deqd;
715 uint16_t num_priv_xforms = 0;
716 unsigned int deqd_retries = 0;
717 struct priv_op_data *priv_data;
720 struct rte_mempool *buf_pool;
722 /* Compressing with CompressDev */
723 unsigned int oos_zlib_decompress =
724 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
725 /* Decompressing with CompressDev */
726 unsigned int oos_zlib_compress =
727 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
728 const struct rte_compressdev_capabilities *capa =
729 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
730 char *contig_buf = NULL;
731 uint64_t compress_checksum[num_bufs];
733 /* Initialize all arrays to NULL */
734 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
735 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
736 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
737 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
738 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
740 if (buff_type == SGL_BOTH)
741 buf_pool = ts_params->small_mbuf_pool;
743 buf_pool = ts_params->large_mbuf_pool;
745 /* Prepare the source mbufs with the data */
746 ret = rte_pktmbuf_alloc_bulk(buf_pool,
747 uncomp_bufs, num_bufs);
750 "Source mbufs could not be allocated "
751 "from the mempool\n");
755 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
756 for (i = 0; i < num_bufs; i++) {
757 data_size = strlen(test_bufs[i]) + 1;
758 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
760 ts_params->small_mbuf_pool,
761 ts_params->large_mbuf_pool,
766 for (i = 0; i < num_bufs; i++) {
767 data_size = strlen(test_bufs[i]) + 1;
768 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
769 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
773 /* Prepare the destination mbufs */
774 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
777 "Destination mbufs could not be allocated "
778 "from the mempool\n");
782 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
783 for (i = 0; i < num_bufs; i++) {
784 if (out_of_space == 1 && oos_zlib_decompress)
785 data_size = OUT_OF_SPACE_BUF;
787 (data_size = strlen(test_bufs[i]) *
788 COMPRESS_BUF_SIZE_RATIO);
790 if (prepare_sgl_bufs(NULL, comp_bufs[i],
792 ts_params->small_mbuf_pool,
793 ts_params->large_mbuf_pool,
799 for (i = 0; i < num_bufs; i++) {
800 if (out_of_space == 1 && oos_zlib_decompress)
801 data_size = OUT_OF_SPACE_BUF;
803 (data_size = strlen(test_bufs[i]) *
804 COMPRESS_BUF_SIZE_RATIO);
806 rte_pktmbuf_append(comp_bufs[i], data_size);
810 /* Build the compression operations */
811 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
814 "Compress operations could not be allocated "
815 "from the mempool\n");
820 for (i = 0; i < num_bufs; i++) {
821 ops[i]->m_src = uncomp_bufs[i];
822 ops[i]->m_dst = comp_bufs[i];
823 ops[i]->src.offset = 0;
824 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
825 ops[i]->dst.offset = 0;
826 if (state == RTE_COMP_OP_STATELESS) {
827 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
830 "Stateful operations are not supported "
831 "in these tests yet\n");
834 ops[i]->input_chksum = 0;
836 * Store original operation index in private data,
837 * since ordering does not have to be maintained,
838 * when dequeueing from compressdev, so a comparison
839 * at the end of the test can be done.
841 priv_data = (struct priv_op_data *) (ops[i] + 1);
842 priv_data->orig_idx = i;
845 /* Compress data (either with Zlib API or compressdev API */
846 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
847 for (i = 0; i < num_bufs; i++) {
848 const struct rte_comp_xform *compress_xform =
849 compress_xforms[i % num_xforms];
850 ret = compress_zlib(ops[i], compress_xform,
855 ops_processed[i] = ops[i];
858 /* Create compress private xform data */
859 for (i = 0; i < num_xforms; i++) {
860 ret = rte_compressdev_private_xform_create(0,
861 (const struct rte_comp_xform *)compress_xforms[i],
865 "Compression private xform "
866 "could not be created\n");
872 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
873 /* Attach shareable private xform data to ops */
874 for (i = 0; i < num_bufs; i++)
875 ops[i]->private_xform = priv_xforms[i % num_xforms];
877 /* Create rest of the private xforms for the other ops */
878 for (i = num_xforms; i < num_bufs; i++) {
879 ret = rte_compressdev_private_xform_create(0,
880 compress_xforms[i % num_xforms],
884 "Compression private xform "
885 "could not be created\n");
891 /* Attach non shareable private xform data to ops */
892 for (i = 0; i < num_bufs; i++)
893 ops[i]->private_xform = priv_xforms[i];
896 /* Enqueue and dequeue all operations */
897 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
898 if (num_enqd < num_bufs) {
900 "The operations could not be enqueued\n");
907 * If retrying a dequeue call, wait for 10 ms to allow
908 * enough time to the driver to process the operations
910 if (deqd_retries != 0) {
912 * Avoid infinite loop if not all the
913 * operations get out of the device
915 if (deqd_retries == MAX_DEQD_RETRIES) {
917 "Not all operations could be "
921 usleep(DEQUEUE_WAIT_TIME);
923 num_deqd = rte_compressdev_dequeue_burst(0, 0,
924 &ops_processed[num_total_deqd], num_bufs);
925 num_total_deqd += num_deqd;
928 } while (num_total_deqd < num_enqd);
932 /* Free compress private xforms */
933 for (i = 0; i < num_priv_xforms; i++) {
934 rte_compressdev_private_xform_free(0, priv_xforms[i]);
935 priv_xforms[i] = NULL;
940 for (i = 0; i < num_bufs; i++) {
941 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
942 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
943 const struct rte_comp_compress_xform *compress_xform =
944 &compress_xforms[xform_idx]->compress;
945 enum rte_comp_huffman huffman_type =
946 compress_xform->deflate.huffman;
947 char engine[] = "zlib (directly, not PMD)";
948 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
949 strlcpy(engine, "PMD", sizeof(engine));
951 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
952 " %u bytes (level = %d, huffman = %s)\n",
953 buf_idx[priv_data->orig_idx], engine,
954 ops_processed[i]->consumed, ops_processed[i]->produced,
955 compress_xform->level,
956 huffman_type_strings[huffman_type]);
957 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
958 ops_processed[i]->consumed == 0 ? 0 :
959 (float)ops_processed[i]->produced /
960 ops_processed[i]->consumed * 100);
961 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
962 compress_checksum[i] = ops_processed[i]->output_chksum;
967 * Check operation status and free source mbufs (destination mbuf and
968 * compress operation information is needed for the decompression stage)
970 for (i = 0; i < num_bufs; i++) {
971 if (out_of_space && oos_zlib_decompress) {
972 if (ops_processed[i]->status !=
973 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
977 "Operation without expected out of "
978 "space status error\n");
984 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
986 "Some operations were not successful\n");
989 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
990 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
991 uncomp_bufs[priv_data->orig_idx] = NULL;
994 if (out_of_space && oos_zlib_decompress) {
999 /* Allocate buffers for decompressed data */
1000 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1003 "Destination mbufs could not be allocated "
1004 "from the mempool\n");
1008 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1009 for (i = 0; i < num_bufs; i++) {
1010 priv_data = (struct priv_op_data *)
1011 (ops_processed[i] + 1);
1012 if (out_of_space == 1 && oos_zlib_compress)
1013 data_size = OUT_OF_SPACE_BUF;
1016 strlen(test_bufs[priv_data->orig_idx]) + 1;
1018 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1020 ts_params->small_mbuf_pool,
1021 ts_params->large_mbuf_pool,
1027 for (i = 0; i < num_bufs; i++) {
1028 priv_data = (struct priv_op_data *)
1029 (ops_processed[i] + 1);
1030 if (out_of_space == 1 && oos_zlib_compress)
1031 data_size = OUT_OF_SPACE_BUF;
1034 strlen(test_bufs[priv_data->orig_idx]) + 1;
1036 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1040 /* Build the decompression operations */
1041 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1044 "Decompress operations could not be allocated "
1045 "from the mempool\n");
1049 /* Source buffer is the compressed data from the previous operations */
1050 for (i = 0; i < num_bufs; i++) {
1051 ops[i]->m_src = ops_processed[i]->m_dst;
1052 ops[i]->m_dst = uncomp_bufs[i];
1053 ops[i]->src.offset = 0;
1055 * Set the length of the compressed data to the
1056 * number of bytes that were produced in the previous stage
1058 ops[i]->src.length = ops_processed[i]->produced;
1059 ops[i]->dst.offset = 0;
1060 if (state == RTE_COMP_OP_STATELESS) {
1061 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1064 "Stateful operations are not supported "
1065 "in these tests yet\n");
1068 ops[i]->input_chksum = 0;
1070 * Copy private data from previous operations,
1071 * to keep the pointer to the original buffer
1073 memcpy(ops[i] + 1, ops_processed[i] + 1,
1074 sizeof(struct priv_op_data));
1078 * Free the previous compress operations,
1079 * as they are not needed anymore
1081 rte_comp_op_bulk_free(ops_processed, num_bufs);
1083 /* Decompress data (either with Zlib API or compressdev API */
1084 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1085 for (i = 0; i < num_bufs; i++) {
1086 priv_data = (struct priv_op_data *)(ops[i] + 1);
1087 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1088 const struct rte_comp_xform *decompress_xform =
1089 decompress_xforms[xform_idx];
1091 ret = decompress_zlib(ops[i], decompress_xform);
1095 ops_processed[i] = ops[i];
1098 /* Create decompress private xform data */
1099 for (i = 0; i < num_xforms; i++) {
1100 ret = rte_compressdev_private_xform_create(0,
1101 (const struct rte_comp_xform *)decompress_xforms[i],
1105 "Decompression private xform "
1106 "could not be created\n");
1112 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1113 /* Attach shareable private xform data to ops */
1114 for (i = 0; i < num_bufs; i++) {
1115 priv_data = (struct priv_op_data *)(ops[i] + 1);
1116 uint16_t xform_idx = priv_data->orig_idx %
1118 ops[i]->private_xform = priv_xforms[xform_idx];
1121 /* Create rest of the private xforms for the other ops */
1122 for (i = num_xforms; i < num_bufs; i++) {
1123 ret = rte_compressdev_private_xform_create(0,
1124 decompress_xforms[i % num_xforms],
1128 "Decompression private xform "
1129 "could not be created\n");
1135 /* Attach non shareable private xform data to ops */
1136 for (i = 0; i < num_bufs; i++) {
1137 priv_data = (struct priv_op_data *)(ops[i] + 1);
1138 uint16_t xform_idx = priv_data->orig_idx;
1139 ops[i]->private_xform = priv_xforms[xform_idx];
1143 /* Enqueue and dequeue all operations */
1144 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1145 if (num_enqd < num_bufs) {
1147 "The operations could not be enqueued\n");
1154 * If retrying a dequeue call, wait for 10 ms to allow
1155 * enough time to the driver to process the operations
1157 if (deqd_retries != 0) {
1159 * Avoid infinite loop if not all the
1160 * operations get out of the device
1162 if (deqd_retries == MAX_DEQD_RETRIES) {
1164 "Not all operations could be "
1168 usleep(DEQUEUE_WAIT_TIME);
1170 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1171 &ops_processed[num_total_deqd], num_bufs);
1172 num_total_deqd += num_deqd;
1174 } while (num_total_deqd < num_enqd);
1179 for (i = 0; i < num_bufs; i++) {
1180 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1181 char engine[] = "zlib, (directly, no PMD)";
1182 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1183 strlcpy(engine, "pmd", sizeof(engine));
1184 RTE_LOG(DEBUG, USER1,
1185 "Buffer %u decompressed by %s from %u to %u bytes\n",
1186 buf_idx[priv_data->orig_idx], engine,
1187 ops_processed[i]->consumed, ops_processed[i]->produced);
1192 * Check operation status and free source mbuf (destination mbuf and
1193 * compress operation information is still needed)
1195 for (i = 0; i < num_bufs; i++) {
1196 if (out_of_space && oos_zlib_compress) {
1197 if (ops_processed[i]->status !=
1198 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1202 "Operation without expected out of "
1203 "space status error\n");
1209 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1211 "Some operations were not successful\n");
1214 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1215 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1216 comp_bufs[priv_data->orig_idx] = NULL;
1219 if (out_of_space && oos_zlib_compress) {
1225 * Compare the original stream with the decompressed stream
1226 * (in size and the data)
1228 for (i = 0; i < num_bufs; i++) {
1229 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1230 const char *buf1 = test_bufs[priv_data->orig_idx];
1232 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1233 if (contig_buf == NULL) {
1234 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1239 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1240 ops_processed[i]->produced, contig_buf);
1241 if (compare_buffers(buf1, strlen(buf1) + 1,
1242 buf2, ops_processed[i]->produced) < 0)
1245 /* Test checksums */
1246 if (compress_xforms[0]->compress.chksum !=
1247 RTE_COMP_CHECKSUM_NONE) {
1248 if (ops_processed[i]->output_chksum !=
1249 compress_checksum[i]) {
1250 RTE_LOG(ERR, USER1, "The checksums differ\n"
1251 "Compression Checksum: %" PRIu64 "\tDecompression "
1252 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1253 ops_processed[i]->output_chksum);
1258 rte_free(contig_buf);
1265 /* Free resources */
1266 for (i = 0; i < num_bufs; i++) {
1267 rte_pktmbuf_free(uncomp_bufs[i]);
1268 rte_pktmbuf_free(comp_bufs[i]);
1269 rte_comp_op_free(ops[i]);
1270 rte_comp_op_free(ops_processed[i]);
1272 for (i = 0; i < num_priv_xforms; i++) {
1273 if (priv_xforms[i] != NULL)
1274 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1276 rte_free(contig_buf);
1282 test_compressdev_deflate_stateless_fixed(void)
1284 struct comp_testsuite_params *ts_params = &testsuite_params;
1287 const struct rte_compressdev_capabilities *capab;
1289 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1290 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1292 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1295 struct rte_comp_xform *compress_xform =
1296 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1298 if (compress_xform == NULL) {
1300 "Compress xform could not be created\n");
1305 memcpy(compress_xform, ts_params->def_comp_xform,
1306 sizeof(struct rte_comp_xform));
1307 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1309 struct interim_data_params int_data = {
1314 &ts_params->def_decomp_xform,
1318 struct test_data_params test_data = {
1319 RTE_COMP_OP_STATELESS,
1325 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1326 int_data.test_bufs = &compress_test_bufs[i];
1327 int_data.buf_idx = &i;
1329 /* Compress with compressdev, decompress with Zlib */
1330 test_data.zlib_dir = ZLIB_DECOMPRESS;
1331 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1336 /* Compress with Zlib, decompress with compressdev */
1337 test_data.zlib_dir = ZLIB_COMPRESS;
1338 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1347 rte_free(compress_xform);
1352 test_compressdev_deflate_stateless_dynamic(void)
1354 struct comp_testsuite_params *ts_params = &testsuite_params;
1357 struct rte_comp_xform *compress_xform =
1358 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1360 const struct rte_compressdev_capabilities *capab;
1362 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1363 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1365 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1368 if (compress_xform == NULL) {
1370 "Compress xform could not be created\n");
1375 memcpy(compress_xform, ts_params->def_comp_xform,
1376 sizeof(struct rte_comp_xform));
1377 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1379 struct interim_data_params int_data = {
1384 &ts_params->def_decomp_xform,
1388 struct test_data_params test_data = {
1389 RTE_COMP_OP_STATELESS,
1395 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1396 int_data.test_bufs = &compress_test_bufs[i];
1397 int_data.buf_idx = &i;
1399 /* Compress with compressdev, decompress with Zlib */
1400 test_data.zlib_dir = ZLIB_DECOMPRESS;
1401 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1406 /* Compress with Zlib, decompress with compressdev */
1407 test_data.zlib_dir = ZLIB_COMPRESS;
1408 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1417 rte_free(compress_xform);
1422 test_compressdev_deflate_stateless_multi_op(void)
1424 struct comp_testsuite_params *ts_params = &testsuite_params;
1425 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1426 uint16_t buf_idx[num_bufs];
1429 for (i = 0; i < num_bufs; i++)
1432 struct interim_data_params int_data = {
1436 &ts_params->def_comp_xform,
1437 &ts_params->def_decomp_xform,
1441 struct test_data_params test_data = {
1442 RTE_COMP_OP_STATELESS,
1448 /* Compress with compressdev, decompress with Zlib */
1449 test_data.zlib_dir = ZLIB_DECOMPRESS;
1450 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1453 /* Compress with Zlib, decompress with compressdev */
1454 test_data.zlib_dir = ZLIB_COMPRESS;
1455 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1458 return TEST_SUCCESS;
1462 test_compressdev_deflate_stateless_multi_level(void)
1464 struct comp_testsuite_params *ts_params = &testsuite_params;
1468 struct rte_comp_xform *compress_xform =
1469 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1471 if (compress_xform == NULL) {
1473 "Compress xform could not be created\n");
1478 memcpy(compress_xform, ts_params->def_comp_xform,
1479 sizeof(struct rte_comp_xform));
1481 struct interim_data_params int_data = {
1486 &ts_params->def_decomp_xform,
1490 struct test_data_params test_data = {
1491 RTE_COMP_OP_STATELESS,
1497 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1498 int_data.test_bufs = &compress_test_bufs[i];
1499 int_data.buf_idx = &i;
1501 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1503 compress_xform->compress.level = level;
1504 /* Compress with compressdev, decompress with Zlib */
1505 test_data.zlib_dir = ZLIB_DECOMPRESS;
1506 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1516 rte_free(compress_xform);
1520 #define NUM_XFORMS 3
1522 test_compressdev_deflate_stateless_multi_xform(void)
1524 struct comp_testsuite_params *ts_params = &testsuite_params;
1525 uint16_t num_bufs = NUM_XFORMS;
1526 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1527 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1528 const char *test_buffers[NUM_XFORMS];
1530 unsigned int level = RTE_COMP_LEVEL_MIN;
1531 uint16_t buf_idx[num_bufs];
1535 /* Create multiple xforms with various levels */
1536 for (i = 0; i < NUM_XFORMS; i++) {
1537 compress_xforms[i] = rte_malloc(NULL,
1538 sizeof(struct rte_comp_xform), 0);
1539 if (compress_xforms[i] == NULL) {
1541 "Compress xform could not be created\n");
1546 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1547 sizeof(struct rte_comp_xform));
1548 compress_xforms[i]->compress.level = level;
1551 decompress_xforms[i] = rte_malloc(NULL,
1552 sizeof(struct rte_comp_xform), 0);
1553 if (decompress_xforms[i] == NULL) {
1555 "Decompress xform could not be created\n");
1560 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1561 sizeof(struct rte_comp_xform));
1564 for (i = 0; i < NUM_XFORMS; i++) {
1566 /* Use the same buffer in all sessions */
1567 test_buffers[i] = compress_test_bufs[0];
1570 struct interim_data_params int_data = {
1579 struct test_data_params test_data = {
1580 RTE_COMP_OP_STATELESS,
1586 /* Compress with compressdev, decompress with Zlib */
1587 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1594 for (i = 0; i < NUM_XFORMS; i++) {
1595 rte_free(compress_xforms[i]);
1596 rte_free(decompress_xforms[i]);
1603 test_compressdev_deflate_stateless_sgl(void)
1605 struct comp_testsuite_params *ts_params = &testsuite_params;
1607 const struct rte_compressdev_capabilities *capab;
1609 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1610 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1612 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1615 struct interim_data_params int_data = {
1619 &ts_params->def_comp_xform,
1620 &ts_params->def_decomp_xform,
1624 struct test_data_params test_data = {
1625 RTE_COMP_OP_STATELESS,
1631 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1632 int_data.test_bufs = &compress_test_bufs[i];
1633 int_data.buf_idx = &i;
1635 /* Compress with compressdev, decompress with Zlib */
1636 test_data.zlib_dir = ZLIB_DECOMPRESS;
1637 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1640 /* Compress with Zlib, decompress with compressdev */
1641 test_data.zlib_dir = ZLIB_COMPRESS;
1642 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1645 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1646 /* Compress with compressdev, decompress with Zlib */
1647 test_data.zlib_dir = ZLIB_DECOMPRESS;
1648 test_data.buff_type = SGL_TO_LB;
1649 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1652 /* Compress with Zlib, decompress with compressdev */
1653 test_data.zlib_dir = ZLIB_COMPRESS;
1654 test_data.buff_type = SGL_TO_LB;
1655 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1659 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1660 /* Compress with compressdev, decompress with Zlib */
1661 test_data.zlib_dir = ZLIB_DECOMPRESS;
1662 test_data.buff_type = LB_TO_SGL;
1663 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1666 /* Compress with Zlib, decompress with compressdev */
1667 test_data.zlib_dir = ZLIB_COMPRESS;
1668 test_data.buff_type = LB_TO_SGL;
1669 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1676 return TEST_SUCCESS;
1681 test_compressdev_deflate_stateless_checksum(void)
1683 struct comp_testsuite_params *ts_params = &testsuite_params;
1686 const struct rte_compressdev_capabilities *capab;
1688 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1689 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1691 /* Check if driver supports any checksum */
1692 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1693 (capab->comp_feature_flags &
1694 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1695 (capab->comp_feature_flags &
1696 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1699 struct rte_comp_xform *compress_xform =
1700 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1701 if (compress_xform == NULL) {
1702 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1707 memcpy(compress_xform, ts_params->def_comp_xform,
1708 sizeof(struct rte_comp_xform));
1710 struct rte_comp_xform *decompress_xform =
1711 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1712 if (decompress_xform == NULL) {
1713 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1714 rte_free(compress_xform);
1719 memcpy(decompress_xform, ts_params->def_decomp_xform,
1720 sizeof(struct rte_comp_xform));
1722 struct interim_data_params int_data = {
1731 struct test_data_params test_data = {
1732 RTE_COMP_OP_STATELESS,
1738 /* Check if driver supports crc32 checksum and test */
1739 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1740 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1741 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1743 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1744 /* Compress with compressdev, decompress with Zlib */
1745 int_data.test_bufs = &compress_test_bufs[i];
1746 int_data.buf_idx = &i;
1748 /* Generate zlib checksum and test against selected
1749 * drivers decompression checksum
1751 test_data.zlib_dir = ZLIB_COMPRESS;
1752 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1757 /* Generate compression and decompression
1758 * checksum of selected driver
1760 test_data.zlib_dir = ZLIB_NONE;
1761 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1768 /* Check if driver supports adler32 checksum and test */
1769 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1770 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1771 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1773 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1774 int_data.test_bufs = &compress_test_bufs[i];
1775 int_data.buf_idx = &i;
1777 /* Generate zlib checksum and test against selected
1778 * drivers decompression checksum
1780 test_data.zlib_dir = ZLIB_COMPRESS;
1781 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1785 /* Generate compression and decompression
1786 * checksum of selected driver
1788 test_data.zlib_dir = ZLIB_NONE;
1789 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1796 /* Check if driver supports combined crc and adler checksum and test */
1797 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1798 compress_xform->compress.chksum =
1799 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1800 decompress_xform->decompress.chksum =
1801 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1803 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1804 int_data.test_bufs = &compress_test_bufs[i];
1805 int_data.buf_idx = &i;
1807 /* Generate compression and decompression
1808 * checksum of selected driver
1810 test_data.zlib_dir = ZLIB_NONE;
1811 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1821 rte_free(compress_xform);
1822 rte_free(decompress_xform);
1827 test_compressdev_out_of_space_buffer(void)
1829 struct comp_testsuite_params *ts_params = &testsuite_params;
1832 const struct rte_compressdev_capabilities *capab;
1834 RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1836 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1837 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1839 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1842 struct rte_comp_xform *compress_xform =
1843 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1845 if (compress_xform == NULL) {
1847 "Compress xform could not be created\n");
1852 struct interim_data_params int_data = {
1853 &compress_test_bufs[0],
1856 &ts_params->def_comp_xform,
1857 &ts_params->def_decomp_xform,
1861 struct test_data_params test_data = {
1862 RTE_COMP_OP_STATELESS,
1867 /* Compress with compressdev, decompress with Zlib */
1868 test_data.zlib_dir = ZLIB_DECOMPRESS;
1869 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1874 /* Compress with Zlib, decompress with compressdev */
1875 test_data.zlib_dir = ZLIB_COMPRESS;
1876 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1881 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1882 /* Compress with compressdev, decompress with Zlib */
1883 test_data.zlib_dir = ZLIB_DECOMPRESS;
1884 test_data.buff_type = SGL_BOTH;
1885 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1890 /* Compress with Zlib, decompress with compressdev */
1891 test_data.zlib_dir = ZLIB_COMPRESS;
1892 test_data.buff_type = SGL_BOTH;
1893 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1902 rte_free(compress_xform);
1907 static struct unit_test_suite compressdev_testsuite = {
1908 .suite_name = "compressdev unit test suite",
1909 .setup = testsuite_setup,
1910 .teardown = testsuite_teardown,
1911 .unit_test_cases = {
1912 TEST_CASE_ST(NULL, NULL,
1913 test_compressdev_invalid_configuration),
1914 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1915 test_compressdev_deflate_stateless_fixed),
1916 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1917 test_compressdev_deflate_stateless_dynamic),
1918 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1919 test_compressdev_deflate_stateless_multi_op),
1920 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1921 test_compressdev_deflate_stateless_multi_level),
1922 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1923 test_compressdev_deflate_stateless_multi_xform),
1924 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1925 test_compressdev_deflate_stateless_sgl),
1926 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1927 test_compressdev_deflate_stateless_checksum),
1928 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1929 test_compressdev_out_of_space_buffer),
1930 TEST_CASES_END() /**< NULL terminate unit test array */
1935 test_compressdev(void)
1937 return unit_test_suite_runner(&compressdev_testsuite);
1940 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);