1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
47 #define OUT_OF_SPACE_BUF 1
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
55 huffman_type_strings[] = {
56 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
57 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
58 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
69 LB_BOTH = 0, /* both input and output are linear*/
70 SGL_BOTH, /* both input and output are chained */
71 SGL_TO_LB, /* input buffer is chained */
72 LB_TO_SGL /* output buffer is chained */
79 struct comp_testsuite_params {
80 struct rte_mempool *large_mbuf_pool;
81 struct rte_mempool *small_mbuf_pool;
82 struct rte_mempool *big_mbuf_pool;
83 struct rte_mempool *op_pool;
84 struct rte_comp_xform *def_comp_xform;
85 struct rte_comp_xform *def_decomp_xform;
88 struct interim_data_params {
89 const char * const *test_bufs;
90 unsigned int num_bufs;
92 struct rte_comp_xform **compress_xforms;
93 struct rte_comp_xform **decompress_xforms;
94 unsigned int num_xforms;
97 struct test_data_params {
98 enum rte_comp_op_type compress_state;
99 enum rte_comp_op_type decompress_state;
100 enum varied_buff buff_type;
101 enum zlib_direction zlib_dir;
102 unsigned int out_of_space;
103 unsigned int big_data;
104 /* stateful decompression specific parameters */
105 unsigned int decompress_output_block_size;
106 unsigned int decompress_steps_max;
109 static struct comp_testsuite_params testsuite_params = { 0 };
112 testsuite_teardown(void)
114 struct comp_testsuite_params *ts_params = &testsuite_params;
116 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
117 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
118 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
119 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
120 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
121 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
122 if (rte_mempool_in_use_count(ts_params->op_pool))
123 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
125 rte_mempool_free(ts_params->large_mbuf_pool);
126 rte_mempool_free(ts_params->small_mbuf_pool);
127 rte_mempool_free(ts_params->big_mbuf_pool);
128 rte_mempool_free(ts_params->op_pool);
129 rte_free(ts_params->def_comp_xform);
130 rte_free(ts_params->def_decomp_xform);
134 testsuite_setup(void)
136 struct comp_testsuite_params *ts_params = &testsuite_params;
137 uint32_t max_buf_size = 0;
140 if (rte_compressdev_count() == 0) {
141 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
145 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
146 rte_compressdev_name_get(0));
148 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
149 max_buf_size = RTE_MAX(max_buf_size,
150 strlen(compress_test_bufs[i]) + 1);
153 * Buffers to be used in compression and decompression.
154 * Since decompressed data might be larger than
155 * compressed data (due to block header),
156 * buffers should be big enough for both cases.
158 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
159 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
162 max_buf_size + RTE_PKTMBUF_HEADROOM,
164 if (ts_params->large_mbuf_pool == NULL) {
165 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
169 /* Create mempool with smaller buffers for SGL testing */
170 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
171 NUM_LARGE_MBUFS * MAX_SEGS,
173 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
175 if (ts_params->small_mbuf_pool == NULL) {
176 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
180 /* Create mempool with big buffers for SGL testing */
181 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
184 MAX_MBUF_SEGMENT_SIZE,
186 if (ts_params->big_mbuf_pool == NULL) {
187 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
191 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
192 0, sizeof(struct priv_op_data),
194 if (ts_params->op_pool == NULL) {
195 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
199 ts_params->def_comp_xform =
200 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
201 if (ts_params->def_comp_xform == NULL) {
203 "Default compress xform could not be created\n");
206 ts_params->def_decomp_xform =
207 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
208 if (ts_params->def_decomp_xform == NULL) {
210 "Default decompress xform could not be created\n");
214 /* Initializes default values for compress/decompress xforms */
215 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
216 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
217 ts_params->def_comp_xform->compress.deflate.huffman =
218 RTE_COMP_HUFFMAN_DEFAULT;
219 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
220 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
221 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
223 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
224 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
225 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
226 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
231 testsuite_teardown();
237 generic_ut_setup(void)
239 /* Configure compressdev (one device, one queue pair) */
240 struct rte_compressdev_config config = {
241 .socket_id = rte_socket_id(),
243 .max_nb_priv_xforms = NUM_MAX_XFORMS,
247 if (rte_compressdev_configure(0, &config) < 0) {
248 RTE_LOG(ERR, USER1, "Device configuration failed\n");
252 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
253 rte_socket_id()) < 0) {
254 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
258 if (rte_compressdev_start(0) < 0) {
259 RTE_LOG(ERR, USER1, "Device could not be started\n");
267 generic_ut_teardown(void)
269 rte_compressdev_stop(0);
270 if (rte_compressdev_close(0) < 0)
271 RTE_LOG(ERR, USER1, "Device could not be closed\n");
275 test_compressdev_invalid_configuration(void)
277 struct rte_compressdev_config invalid_config;
278 struct rte_compressdev_config valid_config = {
279 .socket_id = rte_socket_id(),
281 .max_nb_priv_xforms = NUM_MAX_XFORMS,
284 struct rte_compressdev_info dev_info;
286 /* Invalid configuration with 0 queue pairs */
287 memcpy(&invalid_config, &valid_config,
288 sizeof(struct rte_compressdev_config));
289 invalid_config.nb_queue_pairs = 0;
291 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
292 "Device configuration was successful "
293 "with no queue pairs (invalid)\n");
296 * Invalid configuration with too many queue pairs
297 * (if there is an actual maximum number of queue pairs)
299 rte_compressdev_info_get(0, &dev_info);
300 if (dev_info.max_nb_queue_pairs != 0) {
301 memcpy(&invalid_config, &valid_config,
302 sizeof(struct rte_compressdev_config));
303 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
305 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
306 "Device configuration was successful "
307 "with too many queue pairs (invalid)\n");
310 /* Invalid queue pair setup, with no number of queue pairs set */
311 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
312 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
313 "Queue pair setup was successful "
314 "with no queue pairs set (invalid)\n");
320 compare_buffers(const char *buffer1, uint32_t buffer1_len,
321 const char *buffer2, uint32_t buffer2_len)
323 if (buffer1_len != buffer2_len) {
324 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
328 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
329 RTE_LOG(ERR, USER1, "Buffers are different\n");
337 * Maps compressdev and Zlib flush flags
340 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
343 case RTE_COMP_FLUSH_NONE:
345 case RTE_COMP_FLUSH_SYNC:
347 case RTE_COMP_FLUSH_FULL:
349 case RTE_COMP_FLUSH_FINAL:
352 * There should be only the values above,
353 * so this should never happen
361 compress_zlib(struct rte_comp_op *op,
362 const struct rte_comp_xform *xform, int mem_level)
366 int strategy, window_bits, comp_level;
367 int ret = TEST_FAILED;
368 uint8_t *single_src_buf = NULL;
369 uint8_t *single_dst_buf = NULL;
371 /* initialize zlib stream */
372 stream.zalloc = Z_NULL;
373 stream.zfree = Z_NULL;
374 stream.opaque = Z_NULL;
376 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
379 strategy = Z_DEFAULT_STRATEGY;
382 * Window bits is the base two logarithm of the window size (in bytes).
383 * When doing raw DEFLATE, this number will be negative.
385 window_bits = -(xform->compress.window_size);
386 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
388 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
389 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
391 comp_level = xform->compress.level;
393 if (comp_level != RTE_COMP_LEVEL_NONE)
394 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
395 window_bits, mem_level, strategy);
397 ret = deflateInit(&stream, Z_NO_COMPRESSION);
400 printf("Zlib deflate could not be initialized\n");
404 /* Assuming stateless operation */
406 if (op->m_src->nb_segs > 1) {
407 single_src_buf = rte_malloc(NULL,
408 rte_pktmbuf_pkt_len(op->m_src), 0);
409 if (single_src_buf == NULL) {
410 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
414 if (rte_pktmbuf_read(op->m_src, op->src.offset,
415 rte_pktmbuf_pkt_len(op->m_src) -
417 single_src_buf) == NULL) {
419 "Buffer could not be read entirely\n");
423 stream.avail_in = op->src.length;
424 stream.next_in = single_src_buf;
427 stream.avail_in = op->src.length;
428 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
432 if (op->m_dst->nb_segs > 1) {
434 single_dst_buf = rte_malloc(NULL,
435 rte_pktmbuf_pkt_len(op->m_dst), 0);
436 if (single_dst_buf == NULL) {
438 "Buffer could not be allocated\n");
442 stream.avail_out = op->m_dst->pkt_len;
443 stream.next_out = single_dst_buf;
445 } else {/* linear output */
446 stream.avail_out = op->m_dst->data_len;
447 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
451 /* Stateless operation, all buffer will be compressed in one go */
452 zlib_flush = map_zlib_flush_flag(op->flush_flag);
453 ret = deflate(&stream, zlib_flush);
455 if (stream.avail_in != 0) {
456 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
460 if (ret != Z_STREAM_END)
463 /* Copy data to destination SGL */
464 if (op->m_dst->nb_segs > 1) {
465 uint32_t remaining_data = stream.total_out;
466 uint8_t *src_data = single_dst_buf;
467 struct rte_mbuf *dst_buf = op->m_dst;
469 while (remaining_data > 0) {
470 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
471 uint8_t *, op->dst.offset);
473 if (remaining_data < dst_buf->data_len) {
474 memcpy(dst_data, src_data, remaining_data);
477 memcpy(dst_data, src_data, dst_buf->data_len);
478 remaining_data -= dst_buf->data_len;
479 src_data += dst_buf->data_len;
480 dst_buf = dst_buf->next;
485 op->consumed = stream.total_in;
486 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
487 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
488 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
489 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
491 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
492 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
493 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
494 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
497 op->produced = stream.total_out;
499 op->status = RTE_COMP_OP_STATUS_SUCCESS;
500 op->output_chksum = stream.adler;
502 deflateReset(&stream);
507 rte_free(single_src_buf);
508 rte_free(single_dst_buf);
514 decompress_zlib(struct rte_comp_op *op,
515 const struct rte_comp_xform *xform)
520 int ret = TEST_FAILED;
521 uint8_t *single_src_buf = NULL;
522 uint8_t *single_dst_buf = NULL;
524 /* initialize zlib stream */
525 stream.zalloc = Z_NULL;
526 stream.zfree = Z_NULL;
527 stream.opaque = Z_NULL;
530 * Window bits is the base two logarithm of the window size (in bytes).
531 * When doing raw DEFLATE, this number will be negative.
533 window_bits = -(xform->decompress.window_size);
534 ret = inflateInit2(&stream, window_bits);
537 printf("Zlib deflate could not be initialized\n");
541 /* Assuming stateless operation */
543 if (op->m_src->nb_segs > 1) {
544 single_src_buf = rte_malloc(NULL,
545 rte_pktmbuf_pkt_len(op->m_src), 0);
546 if (single_src_buf == NULL) {
547 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
550 single_dst_buf = rte_malloc(NULL,
551 rte_pktmbuf_pkt_len(op->m_dst), 0);
552 if (single_dst_buf == NULL) {
553 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
556 if (rte_pktmbuf_read(op->m_src, 0,
557 rte_pktmbuf_pkt_len(op->m_src),
558 single_src_buf) == NULL) {
560 "Buffer could not be read entirely\n");
564 stream.avail_in = op->src.length;
565 stream.next_in = single_src_buf;
566 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
567 stream.next_out = single_dst_buf;
570 stream.avail_in = op->src.length;
571 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
572 stream.avail_out = op->m_dst->data_len;
573 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
576 /* Stateless operation, all buffer will be compressed in one go */
577 zlib_flush = map_zlib_flush_flag(op->flush_flag);
578 ret = inflate(&stream, zlib_flush);
580 if (stream.avail_in != 0) {
581 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
585 if (ret != Z_STREAM_END)
588 if (op->m_src->nb_segs > 1) {
589 uint32_t remaining_data = stream.total_out;
590 uint8_t *src_data = single_dst_buf;
591 struct rte_mbuf *dst_buf = op->m_dst;
593 while (remaining_data > 0) {
594 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
597 if (remaining_data < dst_buf->data_len) {
598 memcpy(dst_data, src_data, remaining_data);
601 memcpy(dst_data, src_data, dst_buf->data_len);
602 remaining_data -= dst_buf->data_len;
603 src_data += dst_buf->data_len;
604 dst_buf = dst_buf->next;
609 op->consumed = stream.total_in;
610 op->produced = stream.total_out;
611 op->status = RTE_COMP_OP_STATUS_SUCCESS;
613 inflateReset(&stream);
623 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
624 uint32_t total_data_size,
625 struct rte_mempool *small_mbuf_pool,
626 struct rte_mempool *large_mbuf_pool,
627 uint8_t limit_segs_in_sgl,
630 uint32_t remaining_data = total_data_size;
631 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
632 struct rte_mempool *pool;
633 struct rte_mbuf *next_seg;
636 const char *data_ptr = test_buf;
640 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
641 num_remaining_segs = limit_segs_in_sgl - 1;
644 * Allocate data in the first segment (header) and
645 * copy data if test buffer is provided
647 if (remaining_data < seg_size)
648 data_size = remaining_data;
650 data_size = seg_size;
651 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
652 if (buf_ptr == NULL) {
654 "Not enough space in the 1st buffer\n");
658 if (data_ptr != NULL) {
659 /* Copy characters without NULL terminator */
660 strncpy(buf_ptr, data_ptr, data_size);
661 data_ptr += data_size;
663 remaining_data -= data_size;
664 num_remaining_segs--;
667 * Allocate the rest of the segments,
668 * copy the rest of the data and chain the segments.
670 for (i = 0; i < num_remaining_segs; i++) {
672 if (i == (num_remaining_segs - 1)) {
674 if (remaining_data > seg_size)
675 pool = large_mbuf_pool;
677 pool = small_mbuf_pool;
678 data_size = remaining_data;
680 data_size = seg_size;
681 pool = small_mbuf_pool;
684 next_seg = rte_pktmbuf_alloc(pool);
685 if (next_seg == NULL) {
687 "New segment could not be allocated "
688 "from the mempool\n");
691 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
692 if (buf_ptr == NULL) {
694 "Not enough space in the buffer\n");
695 rte_pktmbuf_free(next_seg);
698 if (data_ptr != NULL) {
699 /* Copy characters without NULL terminator */
700 strncpy(buf_ptr, data_ptr, data_size);
701 data_ptr += data_size;
703 remaining_data -= data_size;
705 ret = rte_pktmbuf_chain(head_buf, next_seg);
707 rte_pktmbuf_free(next_seg);
709 "Segment could not chained\n");
718 * Compresses and decompresses buffer with compressdev API and Zlib API
721 test_deflate_comp_decomp(const struct interim_data_params *int_data,
722 const struct test_data_params *test_data)
724 struct comp_testsuite_params *ts_params = &testsuite_params;
725 const char * const *test_bufs = int_data->test_bufs;
726 unsigned int num_bufs = int_data->num_bufs;
727 uint16_t *buf_idx = int_data->buf_idx;
728 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
729 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
730 unsigned int num_xforms = int_data->num_xforms;
731 enum rte_comp_op_type compress_state = test_data->compress_state;
732 enum rte_comp_op_type decompress_state = test_data->decompress_state;
733 unsigned int buff_type = test_data->buff_type;
734 unsigned int out_of_space = test_data->out_of_space;
735 unsigned int big_data = test_data->big_data;
736 enum zlib_direction zlib_dir = test_data->zlib_dir;
737 int ret_status = TEST_FAILED;
739 struct rte_mbuf *uncomp_bufs[num_bufs];
740 struct rte_mbuf *comp_bufs[num_bufs];
741 struct rte_comp_op *ops[num_bufs];
742 struct rte_comp_op *ops_processed[num_bufs];
743 void *priv_xforms[num_bufs];
744 uint16_t num_enqd, num_deqd, num_total_deqd;
745 uint16_t num_priv_xforms = 0;
746 unsigned int deqd_retries = 0;
747 struct priv_op_data *priv_data;
750 struct rte_mempool *buf_pool;
752 /* Compressing with CompressDev */
753 unsigned int oos_zlib_decompress =
754 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
755 /* Decompressing with CompressDev */
756 unsigned int oos_zlib_compress =
757 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
758 const struct rte_compressdev_capabilities *capa =
759 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
760 char *contig_buf = NULL;
761 uint64_t compress_checksum[num_bufs];
763 char *all_decomp_data = NULL;
764 unsigned int decomp_produced_data_size = 0;
765 unsigned int step = 0;
767 TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
768 "Number of stateful operations in a step should be 1");
772 "Compress device does not support DEFLATE\n");
776 /* Initialize all arrays to NULL */
777 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
778 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
779 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
780 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
781 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
783 if (decompress_state == RTE_COMP_OP_STATEFUL) {
784 data_size = strlen(test_bufs[0]) + 1;
785 all_decomp_data = rte_malloc(NULL, data_size,
786 RTE_CACHE_LINE_SIZE);
790 buf_pool = ts_params->big_mbuf_pool;
791 else if (buff_type == SGL_BOTH)
792 buf_pool = ts_params->small_mbuf_pool;
794 buf_pool = ts_params->large_mbuf_pool;
796 /* Prepare the source mbufs with the data */
797 ret = rte_pktmbuf_alloc_bulk(buf_pool,
798 uncomp_bufs, num_bufs);
801 "Source mbufs could not be allocated "
802 "from the mempool\n");
806 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
807 for (i = 0; i < num_bufs; i++) {
808 data_size = strlen(test_bufs[i]) + 1;
809 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
811 big_data ? buf_pool : ts_params->small_mbuf_pool,
812 big_data ? buf_pool : ts_params->large_mbuf_pool,
813 big_data ? 0 : MAX_SEGS,
814 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
818 for (i = 0; i < num_bufs; i++) {
819 data_size = strlen(test_bufs[i]) + 1;
820 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
821 strlcpy(buf_ptr, test_bufs[i], data_size);
825 /* Prepare the destination mbufs */
826 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
829 "Destination mbufs could not be allocated "
830 "from the mempool\n");
834 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
835 for (i = 0; i < num_bufs; i++) {
836 if (out_of_space == 1 && oos_zlib_decompress)
837 data_size = OUT_OF_SPACE_BUF;
839 (data_size = strlen(test_bufs[i]) *
840 COMPRESS_BUF_SIZE_RATIO);
842 if (prepare_sgl_bufs(NULL, comp_bufs[i],
844 big_data ? buf_pool : ts_params->small_mbuf_pool,
845 big_data ? buf_pool : ts_params->large_mbuf_pool,
846 big_data ? 0 : MAX_SEGS,
847 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
853 for (i = 0; i < num_bufs; i++) {
854 if (out_of_space == 1 && oos_zlib_decompress)
855 data_size = OUT_OF_SPACE_BUF;
857 (data_size = strlen(test_bufs[i]) *
858 COMPRESS_BUF_SIZE_RATIO);
860 rte_pktmbuf_append(comp_bufs[i], data_size);
864 /* Build the compression operations */
865 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
868 "Compress operations could not be allocated "
869 "from the mempool\n");
874 for (i = 0; i < num_bufs; i++) {
875 ops[i]->m_src = uncomp_bufs[i];
876 ops[i]->m_dst = comp_bufs[i];
877 ops[i]->src.offset = 0;
878 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
879 ops[i]->dst.offset = 0;
880 if (compress_state == RTE_COMP_OP_STATELESS)
881 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
884 "Stateful operations are not supported "
885 "in these tests yet\n");
888 ops[i]->input_chksum = 0;
890 * Store original operation index in private data,
891 * since ordering does not have to be maintained,
892 * when dequeueing from compressdev, so a comparison
893 * at the end of the test can be done.
895 priv_data = (struct priv_op_data *) (ops[i] + 1);
896 priv_data->orig_idx = i;
899 /* Compress data (either with Zlib API or compressdev API */
900 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
901 for (i = 0; i < num_bufs; i++) {
902 const struct rte_comp_xform *compress_xform =
903 compress_xforms[i % num_xforms];
904 ret = compress_zlib(ops[i], compress_xform,
909 ops_processed[i] = ops[i];
912 /* Create compress private xform data */
913 for (i = 0; i < num_xforms; i++) {
914 ret = rte_compressdev_private_xform_create(0,
915 (const struct rte_comp_xform *)compress_xforms[i],
919 "Compression private xform "
920 "could not be created\n");
926 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
927 /* Attach shareable private xform data to ops */
928 for (i = 0; i < num_bufs; i++)
929 ops[i]->private_xform = priv_xforms[i % num_xforms];
931 /* Create rest of the private xforms for the other ops */
932 for (i = num_xforms; i < num_bufs; i++) {
933 ret = rte_compressdev_private_xform_create(0,
934 compress_xforms[i % num_xforms],
938 "Compression private xform "
939 "could not be created\n");
945 /* Attach non shareable private xform data to ops */
946 for (i = 0; i < num_bufs; i++)
947 ops[i]->private_xform = priv_xforms[i];
950 /* Enqueue and dequeue all operations */
951 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
952 if (num_enqd < num_bufs) {
954 "The operations could not be enqueued\n");
961 * If retrying a dequeue call, wait for 10 ms to allow
962 * enough time to the driver to process the operations
964 if (deqd_retries != 0) {
966 * Avoid infinite loop if not all the
967 * operations get out of the device
969 if (deqd_retries == MAX_DEQD_RETRIES) {
971 "Not all operations could be "
975 usleep(DEQUEUE_WAIT_TIME);
977 num_deqd = rte_compressdev_dequeue_burst(0, 0,
978 &ops_processed[num_total_deqd], num_bufs);
979 num_total_deqd += num_deqd;
982 } while (num_total_deqd < num_enqd);
986 /* Free compress private xforms */
987 for (i = 0; i < num_priv_xforms; i++) {
988 rte_compressdev_private_xform_free(0, priv_xforms[i]);
989 priv_xforms[i] = NULL;
994 for (i = 0; i < num_bufs; i++) {
995 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
996 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
997 const struct rte_comp_compress_xform *compress_xform =
998 &compress_xforms[xform_idx]->compress;
999 enum rte_comp_huffman huffman_type =
1000 compress_xform->deflate.huffman;
1001 char engine[] = "zlib (directly, not PMD)";
1002 if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
1003 strlcpy(engine, "PMD", sizeof(engine));
1005 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1006 " %u bytes (level = %d, huffman = %s)\n",
1007 buf_idx[priv_data->orig_idx], engine,
1008 ops_processed[i]->consumed, ops_processed[i]->produced,
1009 compress_xform->level,
1010 huffman_type_strings[huffman_type]);
1011 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1012 ops_processed[i]->consumed == 0 ? 0 :
1013 (float)ops_processed[i]->produced /
1014 ops_processed[i]->consumed * 100);
1015 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1016 compress_checksum[i] = ops_processed[i]->output_chksum;
1021 * Check operation status and free source mbufs (destination mbuf and
1022 * compress operation information is needed for the decompression stage)
1024 for (i = 0; i < num_bufs; i++) {
1025 if (out_of_space && oos_zlib_decompress) {
1026 if (ops_processed[i]->status !=
1027 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1028 ret_status = TEST_FAILED;
1030 "Operation without expected out of "
1031 "space status error\n");
1037 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1039 "Some operations were not successful\n");
1042 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1043 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1044 uncomp_bufs[priv_data->orig_idx] = NULL;
1047 if (out_of_space && oos_zlib_decompress) {
1048 ret_status = TEST_SUCCESS;
1052 /* Allocate buffers for decompressed data */
1053 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1056 "Destination mbufs could not be allocated "
1057 "from the mempool\n");
1061 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1062 for (i = 0; i < num_bufs; i++) {
1063 priv_data = (struct priv_op_data *)
1064 (ops_processed[i] + 1);
1065 if (out_of_space == 1 && oos_zlib_compress)
1066 data_size = OUT_OF_SPACE_BUF;
1067 else if (test_data->decompress_output_block_size != 0)
1069 test_data->decompress_output_block_size;
1072 strlen(test_bufs[priv_data->orig_idx]) + 1;
1074 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1076 big_data ? buf_pool : ts_params->small_mbuf_pool,
1077 big_data ? buf_pool : ts_params->large_mbuf_pool,
1078 big_data ? 0 : MAX_SEGS,
1079 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1085 for (i = 0; i < num_bufs; i++) {
1086 priv_data = (struct priv_op_data *)
1087 (ops_processed[i] + 1);
1088 if (out_of_space == 1 && oos_zlib_compress)
1089 data_size = OUT_OF_SPACE_BUF;
1090 else if (test_data->decompress_output_block_size != 0)
1092 test_data->decompress_output_block_size;
1095 strlen(test_bufs[priv_data->orig_idx]) + 1;
1097 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1101 /* Build the decompression operations */
1102 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1105 "Decompress operations could not be allocated "
1106 "from the mempool\n");
1110 /* Source buffer is the compressed data from the previous operations */
1111 for (i = 0; i < num_bufs; i++) {
1112 ops[i]->m_src = ops_processed[i]->m_dst;
1113 ops[i]->m_dst = uncomp_bufs[i];
1114 ops[i]->src.offset = 0;
1116 * Set the length of the compressed data to the
1117 * number of bytes that were produced in the previous stage
1119 ops[i]->src.length = ops_processed[i]->produced;
1121 ops[i]->dst.offset = 0;
1122 if (decompress_state == RTE_COMP_OP_STATELESS) {
1123 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1124 ops[i]->op_type = RTE_COMP_OP_STATELESS;
1125 } else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
1126 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1127 ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1130 "Stateful operations are not supported "
1131 "in these tests yet\n");
1134 ops[i]->input_chksum = 0;
1136 * Copy private data from previous operations,
1137 * to keep the pointer to the original buffer
1139 memcpy(ops[i] + 1, ops_processed[i] + 1,
1140 sizeof(struct priv_op_data));
1144 * Free the previous compress operations,
1145 * as they are not needed anymore
1147 rte_comp_op_bulk_free(ops_processed, num_bufs);
1149 /* Decompress data (either with Zlib API or compressdev API */
1150 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1151 for (i = 0; i < num_bufs; i++) {
1152 priv_data = (struct priv_op_data *)(ops[i] + 1);
1153 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1154 const struct rte_comp_xform *decompress_xform =
1155 decompress_xforms[xform_idx];
1157 ret = decompress_zlib(ops[i], decompress_xform);
1161 ops_processed[i] = ops[i];
1164 if (decompress_state == RTE_COMP_OP_STATELESS) {
1165 /* Create decompress private xform data */
1166 for (i = 0; i < num_xforms; i++) {
1167 ret = rte_compressdev_private_xform_create(0,
1168 (const struct rte_comp_xform *)
1169 decompress_xforms[i],
1173 "Decompression private xform "
1174 "could not be created\n");
1180 if (capa->comp_feature_flags &
1181 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1182 /* Attach shareable private xform data to ops */
1183 for (i = 0; i < num_bufs; i++) {
1184 priv_data = (struct priv_op_data *)
1186 uint16_t xform_idx =
1187 priv_data->orig_idx % num_xforms;
1188 ops[i]->private_xform =
1189 priv_xforms[xform_idx];
1192 /* Create rest of the private xforms */
1193 /* for the other ops */
1194 for (i = num_xforms; i < num_bufs; i++) {
1196 rte_compressdev_private_xform_create(0,
1197 decompress_xforms[i % num_xforms],
1201 "Decompression private xform could not be created\n");
1207 /* Attach non shareable private xform data */
1209 for (i = 0; i < num_bufs; i++) {
1210 priv_data = (struct priv_op_data *)
1212 uint16_t xform_idx =
1213 priv_data->orig_idx;
1214 ops[i]->private_xform =
1215 priv_xforms[xform_idx];
1219 /* Create a stream object for stateful decompression */
1220 ret = rte_compressdev_stream_create(0,
1221 decompress_xforms[0], &stream);
1224 "Decompression stream could not be created, error %d\n",
1228 /* Attach stream to ops */
1229 for (i = 0; i < num_bufs; i++)
1230 ops[i]->stream = stream;
1234 /* Enqueue and dequeue all operations */
1235 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1236 if (num_enqd < num_bufs) {
1238 "The operations could not be enqueued\n");
1245 * If retrying a dequeue call, wait for 10 ms to allow
1246 * enough time to the driver to process the operations
1248 if (deqd_retries != 0) {
1250 * Avoid infinite loop if not all the
1251 * operations get out of the device
1253 if (deqd_retries == MAX_DEQD_RETRIES) {
1255 "Not all operations could be "
1259 usleep(DEQUEUE_WAIT_TIME);
1261 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1262 &ops_processed[num_total_deqd], num_bufs);
1263 num_total_deqd += num_deqd;
1265 } while (num_total_deqd < num_enqd);
1270 for (i = 0; i < num_bufs; i++) {
1271 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1272 char engine[] = "zlib, (directly, no PMD)";
1273 if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1274 strlcpy(engine, "pmd", sizeof(engine));
1275 RTE_LOG(DEBUG, USER1,
1276 "Buffer %u decompressed by %s from %u to %u bytes\n",
1277 buf_idx[priv_data->orig_idx], engine,
1278 ops_processed[i]->consumed, ops_processed[i]->produced);
1283 * Check operation status and free source mbuf (destination mbuf and
1284 * compress operation information is still needed)
1286 for (i = 0; i < num_bufs; i++) {
1287 if (out_of_space && oos_zlib_compress) {
1288 if (ops_processed[i]->status !=
1289 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1290 ret_status = TEST_FAILED;
1292 "Operation without expected out of "
1293 "space status error\n");
1299 if (decompress_state == RTE_COMP_OP_STATEFUL
1300 && (ops_processed[i]->status ==
1301 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1302 || ops_processed[i]->status ==
1303 RTE_COMP_OP_STATUS_SUCCESS)) {
1304 /* collect the output into all_decomp_data */
1305 const void *ptr = rte_pktmbuf_read(
1306 ops_processed[i]->m_dst,
1307 ops_processed[i]->dst.offset,
1308 ops_processed[i]->produced,
1310 decomp_produced_data_size);
1311 if (ptr != all_decomp_data + decomp_produced_data_size)
1312 rte_memcpy(all_decomp_data +
1313 decomp_produced_data_size,
1314 ptr, ops_processed[i]->produced);
1315 decomp_produced_data_size += ops_processed[i]->produced;
1316 if (ops_processed[i]->src.length >
1317 ops_processed[i]->consumed) {
1318 if (ops_processed[i]->status ==
1319 RTE_COMP_OP_STATUS_SUCCESS) {
1322 "Operation finished too early\n");
1326 if (step >= test_data->decompress_steps_max) {
1329 "Operation exceeded maximum steps\n");
1332 ops[i] = ops_processed[i];
1334 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1335 ops[i]->src.offset +=
1336 ops_processed[i]->consumed;
1337 ops[i]->src.length -=
1338 ops_processed[i]->consumed;
1341 /* Compare the original stream with the */
1342 /* decompressed stream (in size and the data) */
1343 priv_data = (struct priv_op_data *)
1344 (ops_processed[i] + 1);
1346 test_bufs[priv_data->orig_idx];
1347 const char *buf2 = all_decomp_data;
1349 if (compare_buffers(buf1, strlen(buf1) + 1,
1350 buf2, decomp_produced_data_size) < 0)
1352 /* Test checksums */
1353 if (compress_xforms[0]->compress.chksum
1354 != RTE_COMP_CHECKSUM_NONE) {
1355 if (ops_processed[i]->output_chksum
1356 != compress_checksum[i]) {
1358 "The checksums differ\n"
1359 "Compression Checksum: %" PRIu64 "\tDecompression "
1360 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1361 ops_processed[i]->output_chksum);
1366 } else if (ops_processed[i]->status !=
1367 RTE_COMP_OP_STATUS_SUCCESS) {
1369 "Some operations were not successful\n");
1372 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1373 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1374 comp_bufs[priv_data->orig_idx] = NULL;
1377 if ((out_of_space && oos_zlib_compress)
1378 || (decompress_state == RTE_COMP_OP_STATEFUL)) {
1379 ret_status = TEST_SUCCESS;
1384 * Compare the original stream with the decompressed stream
1385 * (in size and the data)
1387 for (i = 0; i < num_bufs; i++) {
1388 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1389 const char *buf1 = test_bufs[priv_data->orig_idx];
1391 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1392 if (contig_buf == NULL) {
1393 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1398 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1399 ops_processed[i]->produced, contig_buf);
1400 if (compare_buffers(buf1, strlen(buf1) + 1,
1401 buf2, ops_processed[i]->produced) < 0)
1404 /* Test checksums */
1405 if (compress_xforms[0]->compress.chksum !=
1406 RTE_COMP_CHECKSUM_NONE) {
1407 if (ops_processed[i]->output_chksum !=
1408 compress_checksum[i]) {
1409 RTE_LOG(ERR, USER1, "The checksums differ\n"
1410 "Compression Checksum: %" PRIu64 "\tDecompression "
1411 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1412 ops_processed[i]->output_chksum);
1417 rte_free(contig_buf);
1421 ret_status = TEST_SUCCESS;
1424 /* Free resources */
1425 for (i = 0; i < num_bufs; i++) {
1426 rte_pktmbuf_free(uncomp_bufs[i]);
1427 rte_pktmbuf_free(comp_bufs[i]);
1428 rte_comp_op_free(ops[i]);
1429 rte_comp_op_free(ops_processed[i]);
1431 for (i = 0; i < num_priv_xforms; i++)
1432 if (priv_xforms[i] != NULL)
1433 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1435 rte_compressdev_stream_free(0, stream);
1436 if (all_decomp_data != NULL)
1437 rte_free(all_decomp_data);
1438 rte_free(contig_buf);
1444 test_compressdev_deflate_stateless_fixed(void)
1446 struct comp_testsuite_params *ts_params = &testsuite_params;
1449 const struct rte_compressdev_capabilities *capab;
1451 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1452 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1454 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1457 struct rte_comp_xform *compress_xform =
1458 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1460 if (compress_xform == NULL) {
1462 "Compress xform could not be created\n");
1467 memcpy(compress_xform, ts_params->def_comp_xform,
1468 sizeof(struct rte_comp_xform));
1469 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1471 struct interim_data_params int_data = {
1476 &ts_params->def_decomp_xform,
1480 struct test_data_params test_data = {
1481 RTE_COMP_OP_STATELESS,
1482 RTE_COMP_OP_STATELESS,
1491 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1492 int_data.test_bufs = &compress_test_bufs[i];
1493 int_data.buf_idx = &i;
1495 /* Compress with compressdev, decompress with Zlib */
1496 test_data.zlib_dir = ZLIB_DECOMPRESS;
1497 ret = test_deflate_comp_decomp(&int_data, &test_data);
1501 /* Compress with Zlib, decompress with compressdev */
1502 test_data.zlib_dir = ZLIB_COMPRESS;
1503 ret = test_deflate_comp_decomp(&int_data, &test_data);
1511 rte_free(compress_xform);
1516 test_compressdev_deflate_stateless_dynamic(void)
1518 struct comp_testsuite_params *ts_params = &testsuite_params;
1521 struct rte_comp_xform *compress_xform =
1522 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1524 const struct rte_compressdev_capabilities *capab;
1526 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1527 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1529 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1532 if (compress_xform == NULL) {
1534 "Compress xform could not be created\n");
1539 memcpy(compress_xform, ts_params->def_comp_xform,
1540 sizeof(struct rte_comp_xform));
1541 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1543 struct interim_data_params int_data = {
1548 &ts_params->def_decomp_xform,
1552 struct test_data_params test_data = {
1553 RTE_COMP_OP_STATELESS,
1554 RTE_COMP_OP_STATELESS,
1563 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1564 int_data.test_bufs = &compress_test_bufs[i];
1565 int_data.buf_idx = &i;
1567 /* Compress with compressdev, decompress with Zlib */
1568 test_data.zlib_dir = ZLIB_DECOMPRESS;
1569 ret = test_deflate_comp_decomp(&int_data, &test_data);
1573 /* Compress with Zlib, decompress with compressdev */
1574 test_data.zlib_dir = ZLIB_COMPRESS;
1575 ret = test_deflate_comp_decomp(&int_data, &test_data);
1583 rte_free(compress_xform);
1588 test_compressdev_deflate_stateless_multi_op(void)
1590 struct comp_testsuite_params *ts_params = &testsuite_params;
1591 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1592 uint16_t buf_idx[num_bufs];
1596 for (i = 0; i < num_bufs; i++)
1599 struct interim_data_params int_data = {
1603 &ts_params->def_comp_xform,
1604 &ts_params->def_decomp_xform,
1608 struct test_data_params test_data = {
1609 RTE_COMP_OP_STATELESS,
1610 RTE_COMP_OP_STATELESS,
1619 /* Compress with compressdev, decompress with Zlib */
1620 test_data.zlib_dir = ZLIB_DECOMPRESS;
1621 ret = test_deflate_comp_decomp(&int_data, &test_data);
1625 /* Compress with Zlib, decompress with compressdev */
1626 test_data.zlib_dir = ZLIB_COMPRESS;
1627 ret = test_deflate_comp_decomp(&int_data, &test_data);
1631 return TEST_SUCCESS;
1635 test_compressdev_deflate_stateless_multi_level(void)
1637 struct comp_testsuite_params *ts_params = &testsuite_params;
1641 struct rte_comp_xform *compress_xform =
1642 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1644 if (compress_xform == NULL) {
1646 "Compress xform could not be created\n");
1651 memcpy(compress_xform, ts_params->def_comp_xform,
1652 sizeof(struct rte_comp_xform));
1654 struct interim_data_params int_data = {
1659 &ts_params->def_decomp_xform,
1663 struct test_data_params test_data = {
1664 RTE_COMP_OP_STATELESS,
1665 RTE_COMP_OP_STATELESS,
1674 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1675 int_data.test_bufs = &compress_test_bufs[i];
1676 int_data.buf_idx = &i;
1678 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1680 compress_xform->compress.level = level;
1681 /* Compress with compressdev, decompress with Zlib */
1682 test_data.zlib_dir = ZLIB_DECOMPRESS;
1683 ret = test_deflate_comp_decomp(&int_data, &test_data);
1692 rte_free(compress_xform);
1696 #define NUM_XFORMS 3
1698 test_compressdev_deflate_stateless_multi_xform(void)
1700 struct comp_testsuite_params *ts_params = &testsuite_params;
1701 uint16_t num_bufs = NUM_XFORMS;
1702 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1703 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1704 const char *test_buffers[NUM_XFORMS];
1706 unsigned int level = RTE_COMP_LEVEL_MIN;
1707 uint16_t buf_idx[num_bufs];
1710 /* Create multiple xforms with various levels */
1711 for (i = 0; i < NUM_XFORMS; i++) {
1712 compress_xforms[i] = rte_malloc(NULL,
1713 sizeof(struct rte_comp_xform), 0);
1714 if (compress_xforms[i] == NULL) {
1716 "Compress xform could not be created\n");
1721 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1722 sizeof(struct rte_comp_xform));
1723 compress_xforms[i]->compress.level = level;
1726 decompress_xforms[i] = rte_malloc(NULL,
1727 sizeof(struct rte_comp_xform), 0);
1728 if (decompress_xforms[i] == NULL) {
1730 "Decompress xform could not be created\n");
1735 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1736 sizeof(struct rte_comp_xform));
1739 for (i = 0; i < NUM_XFORMS; i++) {
1741 /* Use the same buffer in all sessions */
1742 test_buffers[i] = compress_test_bufs[0];
1745 struct interim_data_params int_data = {
1754 struct test_data_params test_data = {
1755 RTE_COMP_OP_STATELESS,
1756 RTE_COMP_OP_STATELESS,
1765 /* Compress with compressdev, decompress with Zlib */
1766 ret = test_deflate_comp_decomp(&int_data, &test_data);
1773 for (i = 0; i < NUM_XFORMS; i++) {
1774 rte_free(compress_xforms[i]);
1775 rte_free(decompress_xforms[i]);
1782 test_compressdev_deflate_stateless_sgl(void)
1784 struct comp_testsuite_params *ts_params = &testsuite_params;
1787 const struct rte_compressdev_capabilities *capab;
1789 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1790 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1792 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1795 struct interim_data_params int_data = {
1799 &ts_params->def_comp_xform,
1800 &ts_params->def_decomp_xform,
1804 struct test_data_params test_data = {
1805 RTE_COMP_OP_STATELESS,
1806 RTE_COMP_OP_STATELESS,
1815 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1816 int_data.test_bufs = &compress_test_bufs[i];
1817 int_data.buf_idx = &i;
1819 /* Compress with compressdev, decompress with Zlib */
1820 test_data.zlib_dir = ZLIB_DECOMPRESS;
1821 ret = test_deflate_comp_decomp(&int_data, &test_data);
1825 /* Compress with Zlib, decompress with compressdev */
1826 test_data.zlib_dir = ZLIB_COMPRESS;
1827 ret = test_deflate_comp_decomp(&int_data, &test_data);
1831 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1832 /* Compress with compressdev, decompress with Zlib */
1833 test_data.zlib_dir = ZLIB_DECOMPRESS;
1834 test_data.buff_type = SGL_TO_LB;
1835 ret = test_deflate_comp_decomp(&int_data, &test_data);
1839 /* Compress with Zlib, decompress with compressdev */
1840 test_data.zlib_dir = ZLIB_COMPRESS;
1841 test_data.buff_type = SGL_TO_LB;
1842 ret = test_deflate_comp_decomp(&int_data, &test_data);
1847 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1848 /* Compress with compressdev, decompress with Zlib */
1849 test_data.zlib_dir = ZLIB_DECOMPRESS;
1850 test_data.buff_type = LB_TO_SGL;
1851 ret = test_deflate_comp_decomp(&int_data, &test_data);
1855 /* Compress with Zlib, decompress with compressdev */
1856 test_data.zlib_dir = ZLIB_COMPRESS;
1857 test_data.buff_type = LB_TO_SGL;
1858 ret = test_deflate_comp_decomp(&int_data, &test_data);
1864 return TEST_SUCCESS;
1868 test_compressdev_deflate_stateless_checksum(void)
1870 struct comp_testsuite_params *ts_params = &testsuite_params;
1873 const struct rte_compressdev_capabilities *capab;
1875 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1876 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1878 /* Check if driver supports any checksum */
1879 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1880 (capab->comp_feature_flags &
1881 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1882 (capab->comp_feature_flags &
1883 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1886 struct rte_comp_xform *compress_xform =
1887 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1888 if (compress_xform == NULL) {
1889 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1893 memcpy(compress_xform, ts_params->def_comp_xform,
1894 sizeof(struct rte_comp_xform));
1896 struct rte_comp_xform *decompress_xform =
1897 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1898 if (decompress_xform == NULL) {
1899 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1900 rte_free(compress_xform);
1904 memcpy(decompress_xform, ts_params->def_decomp_xform,
1905 sizeof(struct rte_comp_xform));
1907 struct interim_data_params int_data = {
1916 struct test_data_params test_data = {
1917 RTE_COMP_OP_STATELESS,
1918 RTE_COMP_OP_STATELESS,
1927 /* Check if driver supports crc32 checksum and test */
1928 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1929 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1930 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1932 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1933 /* Compress with compressdev, decompress with Zlib */
1934 int_data.test_bufs = &compress_test_bufs[i];
1935 int_data.buf_idx = &i;
1937 /* Generate zlib checksum and test against selected
1938 * drivers decompression checksum
1940 test_data.zlib_dir = ZLIB_COMPRESS;
1941 ret = test_deflate_comp_decomp(&int_data, &test_data);
1945 /* Generate compression and decompression
1946 * checksum of selected driver
1948 test_data.zlib_dir = ZLIB_NONE;
1949 ret = test_deflate_comp_decomp(&int_data, &test_data);
1955 /* Check if driver supports adler32 checksum and test */
1956 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1957 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1958 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1960 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1961 int_data.test_bufs = &compress_test_bufs[i];
1962 int_data.buf_idx = &i;
1964 /* Generate zlib checksum and test against selected
1965 * drivers decompression checksum
1967 test_data.zlib_dir = ZLIB_COMPRESS;
1968 ret = test_deflate_comp_decomp(&int_data, &test_data);
1971 /* Generate compression and decompression
1972 * checksum of selected driver
1974 test_data.zlib_dir = ZLIB_NONE;
1975 ret = test_deflate_comp_decomp(&int_data, &test_data);
1981 /* Check if driver supports combined crc and adler checksum and test */
1982 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1983 compress_xform->compress.chksum =
1984 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1985 decompress_xform->decompress.chksum =
1986 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1988 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1989 int_data.test_bufs = &compress_test_bufs[i];
1990 int_data.buf_idx = &i;
1992 /* Generate compression and decompression
1993 * checksum of selected driver
1995 test_data.zlib_dir = ZLIB_NONE;
1996 ret = test_deflate_comp_decomp(&int_data, &test_data);
2005 rte_free(compress_xform);
2006 rte_free(decompress_xform);
2011 test_compressdev_out_of_space_buffer(void)
2013 struct comp_testsuite_params *ts_params = &testsuite_params;
2016 const struct rte_compressdev_capabilities *capab;
2018 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2020 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2021 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2023 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2026 struct interim_data_params int_data = {
2027 &compress_test_bufs[0],
2030 &ts_params->def_comp_xform,
2031 &ts_params->def_decomp_xform,
2035 struct test_data_params test_data = {
2036 RTE_COMP_OP_STATELESS,
2037 RTE_COMP_OP_STATELESS,
2040 1, /* run out-of-space test */
2045 /* Compress with compressdev, decompress with Zlib */
2046 test_data.zlib_dir = ZLIB_DECOMPRESS;
2047 ret = test_deflate_comp_decomp(&int_data, &test_data);
2051 /* Compress with Zlib, decompress with compressdev */
2052 test_data.zlib_dir = ZLIB_COMPRESS;
2053 ret = test_deflate_comp_decomp(&int_data, &test_data);
2057 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2058 /* Compress with compressdev, decompress with Zlib */
2059 test_data.zlib_dir = ZLIB_DECOMPRESS;
2060 test_data.buff_type = SGL_BOTH;
2061 ret = test_deflate_comp_decomp(&int_data, &test_data);
2065 /* Compress with Zlib, decompress with compressdev */
2066 test_data.zlib_dir = ZLIB_COMPRESS;
2067 test_data.buff_type = SGL_BOTH;
2068 ret = test_deflate_comp_decomp(&int_data, &test_data);
2080 test_compressdev_deflate_stateless_dynamic_big(void)
2082 struct comp_testsuite_params *ts_params = &testsuite_params;
2086 const struct rte_compressdev_capabilities *capab;
2087 char *test_buffer = NULL;
2089 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2090 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2092 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2095 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2098 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2099 if (test_buffer == NULL) {
2101 "Can't allocate buffer for big-data\n");
2105 struct interim_data_params int_data = {
2106 (const char * const *)&test_buffer,
2109 &ts_params->def_comp_xform,
2110 &ts_params->def_decomp_xform,
2114 struct test_data_params test_data = {
2115 RTE_COMP_OP_STATELESS,
2116 RTE_COMP_OP_STATELESS,
2125 ts_params->def_comp_xform->compress.deflate.huffman =
2126 RTE_COMP_HUFFMAN_DYNAMIC;
2128 /* fill the buffer with data based on rand. data */
2129 srand(BIG_DATA_TEST_SIZE);
2130 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2131 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2132 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2134 /* Compress with compressdev, decompress with Zlib */
2135 test_data.zlib_dir = ZLIB_DECOMPRESS;
2136 ret = test_deflate_comp_decomp(&int_data, &test_data);
2140 /* Compress with Zlib, decompress with compressdev */
2141 test_data.zlib_dir = ZLIB_COMPRESS;
2142 ret = test_deflate_comp_decomp(&int_data, &test_data);
2149 ts_params->def_comp_xform->compress.deflate.huffman =
2150 RTE_COMP_HUFFMAN_DEFAULT;
2151 rte_free(test_buffer);
2156 test_compressdev_deflate_stateful_decomp(void)
2158 struct comp_testsuite_params *ts_params = &testsuite_params;
2161 const struct rte_compressdev_capabilities *capab;
2163 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2164 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2166 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2169 struct interim_data_params int_data = {
2170 &compress_test_bufs[0],
2173 &ts_params->def_comp_xform,
2174 &ts_params->def_decomp_xform,
2178 struct test_data_params test_data = {
2179 RTE_COMP_OP_STATELESS,
2180 RTE_COMP_OP_STATEFUL,
2189 /* Compress with Zlib, decompress with compressdev */
2190 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2195 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2196 /* Now test with SGL buffers */
2197 test_data.buff_type = SGL_BOTH;
2198 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2211 test_compressdev_deflate_stateful_decomp_checksum(void)
2213 struct comp_testsuite_params *ts_params = &testsuite_params;
2216 const struct rte_compressdev_capabilities *capab;
2218 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2219 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2221 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2224 /* Check if driver supports any checksum */
2225 if (!(capab->comp_feature_flags &
2226 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2227 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2230 struct rte_comp_xform *compress_xform =
2231 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2232 if (compress_xform == NULL) {
2233 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2237 memcpy(compress_xform, ts_params->def_comp_xform,
2238 sizeof(struct rte_comp_xform));
2240 struct rte_comp_xform *decompress_xform =
2241 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2242 if (decompress_xform == NULL) {
2243 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2244 rte_free(compress_xform);
2248 memcpy(decompress_xform, ts_params->def_decomp_xform,
2249 sizeof(struct rte_comp_xform));
2251 struct interim_data_params int_data = {
2252 &compress_test_bufs[0],
2260 struct test_data_params test_data = {
2261 RTE_COMP_OP_STATELESS,
2262 RTE_COMP_OP_STATEFUL,
2271 /* Check if driver supports crc32 checksum and test */
2272 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2273 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2274 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2275 /* Compress with Zlib, decompress with compressdev */
2276 test_data.buff_type = LB_BOTH;
2277 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2281 if (capab->comp_feature_flags &
2282 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2283 /* Now test with SGL buffers */
2284 test_data.buff_type = SGL_BOTH;
2285 if (test_deflate_comp_decomp(&int_data,
2293 /* Check if driver supports adler32 checksum and test */
2294 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2295 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2296 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2297 /* Compress with Zlib, decompress with compressdev */
2298 test_data.buff_type = LB_BOTH;
2299 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2303 if (capab->comp_feature_flags &
2304 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2305 /* Now test with SGL buffers */
2306 test_data.buff_type = SGL_BOTH;
2307 if (test_deflate_comp_decomp(&int_data,
2315 /* Check if driver supports combined crc and adler checksum and test */
2316 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2317 compress_xform->compress.chksum =
2318 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2319 decompress_xform->decompress.chksum =
2320 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2321 /* Zlib doesn't support combined checksum */
2322 test_data.zlib_dir = ZLIB_NONE;
2323 /* Compress stateless, decompress stateful with compressdev */
2324 test_data.buff_type = LB_BOTH;
2325 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2329 if (capab->comp_feature_flags &
2330 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2331 /* Now test with SGL buffers */
2332 test_data.buff_type = SGL_BOTH;
2333 if (test_deflate_comp_decomp(&int_data,
2344 rte_free(compress_xform);
2345 rte_free(decompress_xform);
2349 static struct unit_test_suite compressdev_testsuite = {
2350 .suite_name = "compressdev unit test suite",
2351 .setup = testsuite_setup,
2352 .teardown = testsuite_teardown,
2353 .unit_test_cases = {
2354 TEST_CASE_ST(NULL, NULL,
2355 test_compressdev_invalid_configuration),
2356 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2357 test_compressdev_deflate_stateless_fixed),
2358 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2359 test_compressdev_deflate_stateless_dynamic),
2360 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2361 test_compressdev_deflate_stateless_dynamic_big),
2362 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2363 test_compressdev_deflate_stateless_multi_op),
2364 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2365 test_compressdev_deflate_stateless_multi_level),
2366 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2367 test_compressdev_deflate_stateless_multi_xform),
2368 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2369 test_compressdev_deflate_stateless_sgl),
2370 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2371 test_compressdev_deflate_stateless_checksum),
2372 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2373 test_compressdev_out_of_space_buffer),
2374 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2375 test_compressdev_deflate_stateful_decomp),
2376 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2377 test_compressdev_deflate_stateful_decomp_checksum),
2378 TEST_CASES_END() /**< NULL terminate unit test array */
2383 test_compressdev(void)
2385 return unit_test_suite_runner(&compressdev_testsuite);
2388 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);