1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
13 #include <rte_compressdev.h>
14 #include <rte_string_fns.h>
16 #include "test_compressdev_test_buffer.h"
19 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
21 #define DEFAULT_WINDOW_SIZE 15
22 #define DEFAULT_MEM_LEVEL 8
23 #define MAX_DEQD_RETRIES 10
24 #define DEQUEUE_WAIT_TIME 10000
27 * 30% extra size for compressed data compared to original data,
28 * in case data size cannot be reduced and it is actually bigger
29 * due to the compress block headers
31 #define COMPRESS_BUF_SIZE_RATIO 1.3
32 #define NUM_LARGE_MBUFS 16
33 #define SMALL_SEG_SIZE 256
36 #define NUM_MAX_XFORMS 16
37 #define NUM_MAX_INFLIGHT_OPS 128
40 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
41 #define ZLIB_HEADER_SIZE 2
42 #define ZLIB_TRAILER_SIZE 4
43 #define GZIP_HEADER_SIZE 10
44 #define GZIP_TRAILER_SIZE 8
46 #define OUT_OF_SPACE_BUF 1
49 huffman_type_strings[] = {
50 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
51 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
52 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
63 LB_BOTH = 0, /* both input and output are linear*/
64 SGL_BOTH, /* both input and output are chained */
65 SGL_TO_LB, /* input buffer is chained */
66 LB_TO_SGL /* output buffer is chained */
73 struct comp_testsuite_params {
74 struct rte_mempool *large_mbuf_pool;
75 struct rte_mempool *small_mbuf_pool;
76 struct rte_mempool *op_pool;
77 struct rte_comp_xform *def_comp_xform;
78 struct rte_comp_xform *def_decomp_xform;
81 struct interim_data_params {
82 const char * const *test_bufs;
83 unsigned int num_bufs;
85 struct rte_comp_xform **compress_xforms;
86 struct rte_comp_xform **decompress_xforms;
87 unsigned int num_xforms;
90 struct test_data_params {
91 enum rte_comp_op_type state;
92 enum varied_buff buff_type;
93 enum zlib_direction zlib_dir;
94 unsigned int out_of_space;
97 static struct comp_testsuite_params testsuite_params = { 0 };
100 testsuite_teardown(void)
102 struct comp_testsuite_params *ts_params = &testsuite_params;
104 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
105 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
106 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
107 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
108 if (rte_mempool_in_use_count(ts_params->op_pool))
109 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
111 rte_mempool_free(ts_params->large_mbuf_pool);
112 rte_mempool_free(ts_params->small_mbuf_pool);
113 rte_mempool_free(ts_params->op_pool);
114 rte_free(ts_params->def_comp_xform);
115 rte_free(ts_params->def_decomp_xform);
119 testsuite_setup(void)
121 struct comp_testsuite_params *ts_params = &testsuite_params;
122 uint32_t max_buf_size = 0;
125 if (rte_compressdev_count() == 0) {
126 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
130 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
131 rte_compressdev_name_get(0));
133 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
134 max_buf_size = RTE_MAX(max_buf_size,
135 strlen(compress_test_bufs[i]) + 1);
138 * Buffers to be used in compression and decompression.
139 * Since decompressed data might be larger than
140 * compressed data (due to block header),
141 * buffers should be big enough for both cases.
143 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
144 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
147 max_buf_size + RTE_PKTMBUF_HEADROOM,
149 if (ts_params->large_mbuf_pool == NULL) {
150 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
154 /* Create mempool with smaller buffers for SGL testing */
155 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
156 NUM_LARGE_MBUFS * MAX_SEGS,
158 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
160 if (ts_params->small_mbuf_pool == NULL) {
161 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
165 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
166 0, sizeof(struct priv_op_data),
168 if (ts_params->op_pool == NULL) {
169 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
173 ts_params->def_comp_xform =
174 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
175 if (ts_params->def_comp_xform == NULL) {
177 "Default compress xform could not be created\n");
180 ts_params->def_decomp_xform =
181 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
182 if (ts_params->def_decomp_xform == NULL) {
184 "Default decompress xform could not be created\n");
188 /* Initializes default values for compress/decompress xforms */
189 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
190 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
191 ts_params->def_comp_xform->compress.deflate.huffman =
192 RTE_COMP_HUFFMAN_DEFAULT;
193 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
194 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
195 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
197 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
198 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
199 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
200 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
205 testsuite_teardown();
211 generic_ut_setup(void)
213 /* Configure compressdev (one device, one queue pair) */
214 struct rte_compressdev_config config = {
215 .socket_id = rte_socket_id(),
217 .max_nb_priv_xforms = NUM_MAX_XFORMS,
221 if (rte_compressdev_configure(0, &config) < 0) {
222 RTE_LOG(ERR, USER1, "Device configuration failed\n");
226 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
227 rte_socket_id()) < 0) {
228 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
232 if (rte_compressdev_start(0) < 0) {
233 RTE_LOG(ERR, USER1, "Device could not be started\n");
241 generic_ut_teardown(void)
243 rte_compressdev_stop(0);
244 if (rte_compressdev_close(0) < 0)
245 RTE_LOG(ERR, USER1, "Device could not be closed\n");
249 test_compressdev_invalid_configuration(void)
251 struct rte_compressdev_config invalid_config;
252 struct rte_compressdev_config valid_config = {
253 .socket_id = rte_socket_id(),
255 .max_nb_priv_xforms = NUM_MAX_XFORMS,
258 struct rte_compressdev_info dev_info;
260 /* Invalid configuration with 0 queue pairs */
261 memcpy(&invalid_config, &valid_config,
262 sizeof(struct rte_compressdev_config));
263 invalid_config.nb_queue_pairs = 0;
265 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
266 "Device configuration was successful "
267 "with no queue pairs (invalid)\n");
270 * Invalid configuration with too many queue pairs
271 * (if there is an actual maximum number of queue pairs)
273 rte_compressdev_info_get(0, &dev_info);
274 if (dev_info.max_nb_queue_pairs != 0) {
275 memcpy(&invalid_config, &valid_config,
276 sizeof(struct rte_compressdev_config));
277 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
279 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
280 "Device configuration was successful "
281 "with too many queue pairs (invalid)\n");
284 /* Invalid queue pair setup, with no number of queue pairs set */
285 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
286 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
287 "Queue pair setup was successful "
288 "with no queue pairs set (invalid)\n");
294 compare_buffers(const char *buffer1, uint32_t buffer1_len,
295 const char *buffer2, uint32_t buffer2_len)
297 if (buffer1_len != buffer2_len) {
298 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
302 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
303 RTE_LOG(ERR, USER1, "Buffers are different\n");
311 * Maps compressdev and Zlib flush flags
314 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
317 case RTE_COMP_FLUSH_NONE:
319 case RTE_COMP_FLUSH_SYNC:
321 case RTE_COMP_FLUSH_FULL:
323 case RTE_COMP_FLUSH_FINAL:
326 * There should be only the values above,
327 * so this should never happen
335 compress_zlib(struct rte_comp_op *op,
336 const struct rte_comp_xform *xform, int mem_level)
340 int strategy, window_bits, comp_level;
341 int ret = TEST_FAILED;
342 uint8_t *single_src_buf = NULL;
343 uint8_t *single_dst_buf = NULL;
345 /* initialize zlib stream */
346 stream.zalloc = Z_NULL;
347 stream.zfree = Z_NULL;
348 stream.opaque = Z_NULL;
350 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
353 strategy = Z_DEFAULT_STRATEGY;
356 * Window bits is the base two logarithm of the window size (in bytes).
357 * When doing raw DEFLATE, this number will be negative.
359 window_bits = -(xform->compress.window_size);
360 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
362 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
363 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
365 comp_level = xform->compress.level;
367 if (comp_level != RTE_COMP_LEVEL_NONE)
368 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
369 window_bits, mem_level, strategy);
371 ret = deflateInit(&stream, Z_NO_COMPRESSION);
374 printf("Zlib deflate could not be initialized\n");
378 /* Assuming stateless operation */
380 if (op->m_src->nb_segs > 1) {
381 single_src_buf = rte_malloc(NULL,
382 rte_pktmbuf_pkt_len(op->m_src), 0);
383 if (single_src_buf == NULL) {
384 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
388 if (rte_pktmbuf_read(op->m_src, op->src.offset,
389 rte_pktmbuf_pkt_len(op->m_src) -
391 single_src_buf) == NULL) {
393 "Buffer could not be read entirely\n");
397 stream.avail_in = op->src.length;
398 stream.next_in = single_src_buf;
401 stream.avail_in = op->src.length;
402 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
406 if (op->m_dst->nb_segs > 1) {
408 single_dst_buf = rte_malloc(NULL,
409 rte_pktmbuf_pkt_len(op->m_dst), 0);
410 if (single_dst_buf == NULL) {
412 "Buffer could not be allocated\n");
416 stream.avail_out = op->m_dst->pkt_len;
417 stream.next_out = single_dst_buf;
419 } else {/* linear output */
420 stream.avail_out = op->m_dst->data_len;
421 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
425 /* Stateless operation, all buffer will be compressed in one go */
426 zlib_flush = map_zlib_flush_flag(op->flush_flag);
427 ret = deflate(&stream, zlib_flush);
429 if (stream.avail_in != 0) {
430 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
434 if (ret != Z_STREAM_END)
437 /* Copy data to destination SGL */
438 if (op->m_dst->nb_segs > 1) {
439 uint32_t remaining_data = stream.total_out;
440 uint8_t *src_data = single_dst_buf;
441 struct rte_mbuf *dst_buf = op->m_dst;
443 while (remaining_data > 0) {
444 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
445 uint8_t *, op->dst.offset);
447 if (remaining_data < dst_buf->data_len) {
448 memcpy(dst_data, src_data, remaining_data);
451 memcpy(dst_data, src_data, dst_buf->data_len);
452 remaining_data -= dst_buf->data_len;
453 src_data += dst_buf->data_len;
454 dst_buf = dst_buf->next;
459 op->consumed = stream.total_in;
460 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
461 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
462 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
463 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
465 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
466 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
467 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
468 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
471 op->produced = stream.total_out;
473 op->status = RTE_COMP_OP_STATUS_SUCCESS;
474 op->output_chksum = stream.adler;
476 deflateReset(&stream);
481 rte_free(single_src_buf);
482 rte_free(single_dst_buf);
488 decompress_zlib(struct rte_comp_op *op,
489 const struct rte_comp_xform *xform)
494 int ret = TEST_FAILED;
495 uint8_t *single_src_buf = NULL;
496 uint8_t *single_dst_buf = NULL;
498 /* initialize zlib stream */
499 stream.zalloc = Z_NULL;
500 stream.zfree = Z_NULL;
501 stream.opaque = Z_NULL;
504 * Window bits is the base two logarithm of the window size (in bytes).
505 * When doing raw DEFLATE, this number will be negative.
507 window_bits = -(xform->decompress.window_size);
508 ret = inflateInit2(&stream, window_bits);
511 printf("Zlib deflate could not be initialized\n");
515 /* Assuming stateless operation */
517 if (op->m_src->nb_segs > 1) {
518 single_src_buf = rte_malloc(NULL,
519 rte_pktmbuf_pkt_len(op->m_src), 0);
520 if (single_src_buf == NULL) {
521 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
524 single_dst_buf = rte_malloc(NULL,
525 rte_pktmbuf_pkt_len(op->m_dst), 0);
526 if (single_dst_buf == NULL) {
527 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
530 if (rte_pktmbuf_read(op->m_src, 0,
531 rte_pktmbuf_pkt_len(op->m_src),
532 single_src_buf) == NULL) {
534 "Buffer could not be read entirely\n");
538 stream.avail_in = op->src.length;
539 stream.next_in = single_src_buf;
540 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
541 stream.next_out = single_dst_buf;
544 stream.avail_in = op->src.length;
545 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
546 stream.avail_out = op->m_dst->data_len;
547 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
550 /* Stateless operation, all buffer will be compressed in one go */
551 zlib_flush = map_zlib_flush_flag(op->flush_flag);
552 ret = inflate(&stream, zlib_flush);
554 if (stream.avail_in != 0) {
555 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
559 if (ret != Z_STREAM_END)
562 if (op->m_src->nb_segs > 1) {
563 uint32_t remaining_data = stream.total_out;
564 uint8_t *src_data = single_dst_buf;
565 struct rte_mbuf *dst_buf = op->m_dst;
567 while (remaining_data > 0) {
568 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
571 if (remaining_data < dst_buf->data_len) {
572 memcpy(dst_data, src_data, remaining_data);
575 memcpy(dst_data, src_data, dst_buf->data_len);
576 remaining_data -= dst_buf->data_len;
577 src_data += dst_buf->data_len;
578 dst_buf = dst_buf->next;
583 op->consumed = stream.total_in;
584 op->produced = stream.total_out;
585 op->status = RTE_COMP_OP_STATUS_SUCCESS;
587 inflateReset(&stream);
597 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
598 uint32_t total_data_size,
599 struct rte_mempool *small_mbuf_pool,
600 struct rte_mempool *large_mbuf_pool,
601 uint8_t limit_segs_in_sgl)
603 uint32_t remaining_data = total_data_size;
604 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
605 struct rte_mempool *pool;
606 struct rte_mbuf *next_seg;
609 const char *data_ptr = test_buf;
613 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
614 num_remaining_segs = limit_segs_in_sgl - 1;
617 * Allocate data in the first segment (header) and
618 * copy data if test buffer is provided
620 if (remaining_data < SMALL_SEG_SIZE)
621 data_size = remaining_data;
623 data_size = SMALL_SEG_SIZE;
624 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
625 if (buf_ptr == NULL) {
627 "Not enough space in the 1st buffer\n");
631 if (data_ptr != NULL) {
632 /* Copy characters without NULL terminator */
633 strncpy(buf_ptr, data_ptr, data_size);
634 data_ptr += data_size;
636 remaining_data -= data_size;
637 num_remaining_segs--;
640 * Allocate the rest of the segments,
641 * copy the rest of the data and chain the segments.
643 for (i = 0; i < num_remaining_segs; i++) {
645 if (i == (num_remaining_segs - 1)) {
647 if (remaining_data > SMALL_SEG_SIZE)
648 pool = large_mbuf_pool;
650 pool = small_mbuf_pool;
651 data_size = remaining_data;
653 data_size = SMALL_SEG_SIZE;
654 pool = small_mbuf_pool;
657 next_seg = rte_pktmbuf_alloc(pool);
658 if (next_seg == NULL) {
660 "New segment could not be allocated "
661 "from the mempool\n");
664 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
665 if (buf_ptr == NULL) {
667 "Not enough space in the buffer\n");
668 rte_pktmbuf_free(next_seg);
671 if (data_ptr != NULL) {
672 /* Copy characters without NULL terminator */
673 strncpy(buf_ptr, data_ptr, data_size);
674 data_ptr += data_size;
676 remaining_data -= data_size;
678 ret = rte_pktmbuf_chain(head_buf, next_seg);
680 rte_pktmbuf_free(next_seg);
682 "Segment could not chained\n");
691 * Compresses and decompresses buffer with compressdev API and Zlib API
694 test_deflate_comp_decomp(const struct interim_data_params *int_data,
695 const struct test_data_params *test_data)
697 struct comp_testsuite_params *ts_params = &testsuite_params;
698 const char * const *test_bufs = int_data->test_bufs;
699 unsigned int num_bufs = int_data->num_bufs;
700 uint16_t *buf_idx = int_data->buf_idx;
701 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
702 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
703 unsigned int num_xforms = int_data->num_xforms;
704 enum rte_comp_op_type state = test_data->state;
705 unsigned int buff_type = test_data->buff_type;
706 unsigned int out_of_space = test_data->out_of_space;
707 enum zlib_direction zlib_dir = test_data->zlib_dir;
710 struct rte_mbuf *uncomp_bufs[num_bufs];
711 struct rte_mbuf *comp_bufs[num_bufs];
712 struct rte_comp_op *ops[num_bufs];
713 struct rte_comp_op *ops_processed[num_bufs];
714 void *priv_xforms[num_bufs];
715 uint16_t num_enqd, num_deqd, num_total_deqd;
716 uint16_t num_priv_xforms = 0;
717 unsigned int deqd_retries = 0;
718 struct priv_op_data *priv_data;
721 struct rte_mempool *buf_pool;
723 /* Compressing with CompressDev */
724 unsigned int oos_zlib_decompress =
725 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
726 /* Decompressing with CompressDev */
727 unsigned int oos_zlib_compress =
728 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
729 const struct rte_compressdev_capabilities *capa =
730 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
731 char *contig_buf = NULL;
732 uint64_t compress_checksum[num_bufs];
734 /* Initialize all arrays to NULL */
735 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
736 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
737 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
738 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
739 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
741 if (buff_type == SGL_BOTH)
742 buf_pool = ts_params->small_mbuf_pool;
744 buf_pool = ts_params->large_mbuf_pool;
746 /* Prepare the source mbufs with the data */
747 ret = rte_pktmbuf_alloc_bulk(buf_pool,
748 uncomp_bufs, num_bufs);
751 "Source mbufs could not be allocated "
752 "from the mempool\n");
756 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
757 for (i = 0; i < num_bufs; i++) {
758 data_size = strlen(test_bufs[i]) + 1;
759 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
761 ts_params->small_mbuf_pool,
762 ts_params->large_mbuf_pool,
767 for (i = 0; i < num_bufs; i++) {
768 data_size = strlen(test_bufs[i]) + 1;
769 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
770 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
774 /* Prepare the destination mbufs */
775 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
778 "Destination mbufs could not be allocated "
779 "from the mempool\n");
783 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
784 for (i = 0; i < num_bufs; i++) {
785 if (out_of_space == 1 && oos_zlib_decompress)
786 data_size = OUT_OF_SPACE_BUF;
788 (data_size = strlen(test_bufs[i]) *
789 COMPRESS_BUF_SIZE_RATIO);
791 if (prepare_sgl_bufs(NULL, comp_bufs[i],
793 ts_params->small_mbuf_pool,
794 ts_params->large_mbuf_pool,
800 for (i = 0; i < num_bufs; i++) {
801 if (out_of_space == 1 && oos_zlib_decompress)
802 data_size = OUT_OF_SPACE_BUF;
804 (data_size = strlen(test_bufs[i]) *
805 COMPRESS_BUF_SIZE_RATIO);
807 rte_pktmbuf_append(comp_bufs[i], data_size);
811 /* Build the compression operations */
812 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
815 "Compress operations could not be allocated "
816 "from the mempool\n");
821 for (i = 0; i < num_bufs; i++) {
822 ops[i]->m_src = uncomp_bufs[i];
823 ops[i]->m_dst = comp_bufs[i];
824 ops[i]->src.offset = 0;
825 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
826 ops[i]->dst.offset = 0;
827 if (state == RTE_COMP_OP_STATELESS) {
828 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
831 "Stateful operations are not supported "
832 "in these tests yet\n");
835 ops[i]->input_chksum = 0;
837 * Store original operation index in private data,
838 * since ordering does not have to be maintained,
839 * when dequeueing from compressdev, so a comparison
840 * at the end of the test can be done.
842 priv_data = (struct priv_op_data *) (ops[i] + 1);
843 priv_data->orig_idx = i;
846 /* Compress data (either with Zlib API or compressdev API */
847 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
848 for (i = 0; i < num_bufs; i++) {
849 const struct rte_comp_xform *compress_xform =
850 compress_xforms[i % num_xforms];
851 ret = compress_zlib(ops[i], compress_xform,
856 ops_processed[i] = ops[i];
859 /* Create compress private xform data */
860 for (i = 0; i < num_xforms; i++) {
861 ret = rte_compressdev_private_xform_create(0,
862 (const struct rte_comp_xform *)compress_xforms[i],
866 "Compression private xform "
867 "could not be created\n");
873 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
874 /* Attach shareable private xform data to ops */
875 for (i = 0; i < num_bufs; i++)
876 ops[i]->private_xform = priv_xforms[i % num_xforms];
878 /* Create rest of the private xforms for the other ops */
879 for (i = num_xforms; i < num_bufs; i++) {
880 ret = rte_compressdev_private_xform_create(0,
881 compress_xforms[i % num_xforms],
885 "Compression private xform "
886 "could not be created\n");
892 /* Attach non shareable private xform data to ops */
893 for (i = 0; i < num_bufs; i++)
894 ops[i]->private_xform = priv_xforms[i];
897 /* Enqueue and dequeue all operations */
898 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
899 if (num_enqd < num_bufs) {
901 "The operations could not be enqueued\n");
908 * If retrying a dequeue call, wait for 10 ms to allow
909 * enough time to the driver to process the operations
911 if (deqd_retries != 0) {
913 * Avoid infinite loop if not all the
914 * operations get out of the device
916 if (deqd_retries == MAX_DEQD_RETRIES) {
918 "Not all operations could be "
922 usleep(DEQUEUE_WAIT_TIME);
924 num_deqd = rte_compressdev_dequeue_burst(0, 0,
925 &ops_processed[num_total_deqd], num_bufs);
926 num_total_deqd += num_deqd;
929 } while (num_total_deqd < num_enqd);
933 /* Free compress private xforms */
934 for (i = 0; i < num_priv_xforms; i++) {
935 rte_compressdev_private_xform_free(0, priv_xforms[i]);
936 priv_xforms[i] = NULL;
941 for (i = 0; i < num_bufs; i++) {
942 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
943 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
944 const struct rte_comp_compress_xform *compress_xform =
945 &compress_xforms[xform_idx]->compress;
946 enum rte_comp_huffman huffman_type =
947 compress_xform->deflate.huffman;
948 char engine[] = "zlib (directly, not PMD)";
949 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
950 strlcpy(engine, "PMD", sizeof(engine));
952 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
953 " %u bytes (level = %d, huffman = %s)\n",
954 buf_idx[priv_data->orig_idx], engine,
955 ops_processed[i]->consumed, ops_processed[i]->produced,
956 compress_xform->level,
957 huffman_type_strings[huffman_type]);
958 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
959 ops_processed[i]->consumed == 0 ? 0 :
960 (float)ops_processed[i]->produced /
961 ops_processed[i]->consumed * 100);
962 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
963 compress_checksum[i] = ops_processed[i]->output_chksum;
968 * Check operation status and free source mbufs (destination mbuf and
969 * compress operation information is needed for the decompression stage)
971 for (i = 0; i < num_bufs; i++) {
972 if (out_of_space && oos_zlib_decompress) {
973 if (ops_processed[i]->status !=
974 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
978 "Operation without expected out of "
979 "space status error\n");
985 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
987 "Some operations were not successful\n");
990 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
991 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
992 uncomp_bufs[priv_data->orig_idx] = NULL;
995 if (out_of_space && oos_zlib_decompress) {
1000 /* Allocate buffers for decompressed data */
1001 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1004 "Destination mbufs could not be allocated "
1005 "from the mempool\n");
1009 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1010 for (i = 0; i < num_bufs; i++) {
1011 priv_data = (struct priv_op_data *)
1012 (ops_processed[i] + 1);
1013 if (out_of_space == 1 && oos_zlib_compress)
1014 data_size = OUT_OF_SPACE_BUF;
1017 strlen(test_bufs[priv_data->orig_idx]) + 1;
1019 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1021 ts_params->small_mbuf_pool,
1022 ts_params->large_mbuf_pool,
1028 for (i = 0; i < num_bufs; i++) {
1029 priv_data = (struct priv_op_data *)
1030 (ops_processed[i] + 1);
1031 if (out_of_space == 1 && oos_zlib_compress)
1032 data_size = OUT_OF_SPACE_BUF;
1035 strlen(test_bufs[priv_data->orig_idx]) + 1;
1037 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1041 /* Build the decompression operations */
1042 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1045 "Decompress operations could not be allocated "
1046 "from the mempool\n");
1050 /* Source buffer is the compressed data from the previous operations */
1051 for (i = 0; i < num_bufs; i++) {
1052 ops[i]->m_src = ops_processed[i]->m_dst;
1053 ops[i]->m_dst = uncomp_bufs[i];
1054 ops[i]->src.offset = 0;
1056 * Set the length of the compressed data to the
1057 * number of bytes that were produced in the previous stage
1059 ops[i]->src.length = ops_processed[i]->produced;
1060 ops[i]->dst.offset = 0;
1061 if (state == RTE_COMP_OP_STATELESS) {
1062 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1065 "Stateful operations are not supported "
1066 "in these tests yet\n");
1069 ops[i]->input_chksum = 0;
1071 * Copy private data from previous operations,
1072 * to keep the pointer to the original buffer
1074 memcpy(ops[i] + 1, ops_processed[i] + 1,
1075 sizeof(struct priv_op_data));
1079 * Free the previous compress operations,
1080 * as they are not needed anymore
1082 rte_comp_op_bulk_free(ops_processed, num_bufs);
1084 /* Decompress data (either with Zlib API or compressdev API */
1085 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1086 for (i = 0; i < num_bufs; i++) {
1087 priv_data = (struct priv_op_data *)(ops[i] + 1);
1088 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1089 const struct rte_comp_xform *decompress_xform =
1090 decompress_xforms[xform_idx];
1092 ret = decompress_zlib(ops[i], decompress_xform);
1096 ops_processed[i] = ops[i];
1099 /* Create decompress private xform data */
1100 for (i = 0; i < num_xforms; i++) {
1101 ret = rte_compressdev_private_xform_create(0,
1102 (const struct rte_comp_xform *)decompress_xforms[i],
1106 "Decompression private xform "
1107 "could not be created\n");
1113 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1114 /* Attach shareable private xform data to ops */
1115 for (i = 0; i < num_bufs; i++) {
1116 priv_data = (struct priv_op_data *)(ops[i] + 1);
1117 uint16_t xform_idx = priv_data->orig_idx %
1119 ops[i]->private_xform = priv_xforms[xform_idx];
1122 /* Create rest of the private xforms for the other ops */
1123 for (i = num_xforms; i < num_bufs; i++) {
1124 ret = rte_compressdev_private_xform_create(0,
1125 decompress_xforms[i % num_xforms],
1129 "Decompression private xform "
1130 "could not be created\n");
1136 /* Attach non shareable private xform data to ops */
1137 for (i = 0; i < num_bufs; i++) {
1138 priv_data = (struct priv_op_data *)(ops[i] + 1);
1139 uint16_t xform_idx = priv_data->orig_idx;
1140 ops[i]->private_xform = priv_xforms[xform_idx];
1144 /* Enqueue and dequeue all operations */
1145 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1146 if (num_enqd < num_bufs) {
1148 "The operations could not be enqueued\n");
1155 * If retrying a dequeue call, wait for 10 ms to allow
1156 * enough time to the driver to process the operations
1158 if (deqd_retries != 0) {
1160 * Avoid infinite loop if not all the
1161 * operations get out of the device
1163 if (deqd_retries == MAX_DEQD_RETRIES) {
1165 "Not all operations could be "
1169 usleep(DEQUEUE_WAIT_TIME);
1171 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1172 &ops_processed[num_total_deqd], num_bufs);
1173 num_total_deqd += num_deqd;
1175 } while (num_total_deqd < num_enqd);
1180 for (i = 0; i < num_bufs; i++) {
1181 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1182 char engine[] = "zlib, (directly, no PMD)";
1183 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1184 strlcpy(engine, "pmd", sizeof(engine));
1185 RTE_LOG(DEBUG, USER1,
1186 "Buffer %u decompressed by %s from %u to %u bytes\n",
1187 buf_idx[priv_data->orig_idx], engine,
1188 ops_processed[i]->consumed, ops_processed[i]->produced);
1193 * Check operation status and free source mbuf (destination mbuf and
1194 * compress operation information is still needed)
1196 for (i = 0; i < num_bufs; i++) {
1197 if (out_of_space && oos_zlib_compress) {
1198 if (ops_processed[i]->status !=
1199 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1203 "Operation without expected out of "
1204 "space status error\n");
1210 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1212 "Some operations were not successful\n");
1215 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1216 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1217 comp_bufs[priv_data->orig_idx] = NULL;
1220 if (out_of_space && oos_zlib_compress) {
1226 * Compare the original stream with the decompressed stream
1227 * (in size and the data)
1229 for (i = 0; i < num_bufs; i++) {
1230 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1231 const char *buf1 = test_bufs[priv_data->orig_idx];
1233 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1234 if (contig_buf == NULL) {
1235 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1240 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1241 ops_processed[i]->produced, contig_buf);
1242 if (compare_buffers(buf1, strlen(buf1) + 1,
1243 buf2, ops_processed[i]->produced) < 0)
1246 /* Test checksums */
1247 if (compress_xforms[0]->compress.chksum !=
1248 RTE_COMP_CHECKSUM_NONE) {
1249 if (ops_processed[i]->output_chksum !=
1250 compress_checksum[i]) {
1251 RTE_LOG(ERR, USER1, "The checksums differ\n"
1252 "Compression Checksum: %" PRIu64 "\tDecompression "
1253 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1254 ops_processed[i]->output_chksum);
1259 rte_free(contig_buf);
1266 /* Free resources */
1267 for (i = 0; i < num_bufs; i++) {
1268 rte_pktmbuf_free(uncomp_bufs[i]);
1269 rte_pktmbuf_free(comp_bufs[i]);
1270 rte_comp_op_free(ops[i]);
1271 rte_comp_op_free(ops_processed[i]);
1273 for (i = 0; i < num_priv_xforms; i++) {
1274 if (priv_xforms[i] != NULL)
1275 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1277 rte_free(contig_buf);
1283 test_compressdev_deflate_stateless_fixed(void)
1285 struct comp_testsuite_params *ts_params = &testsuite_params;
1288 const struct rte_compressdev_capabilities *capab;
1290 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1291 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1293 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1296 struct rte_comp_xform *compress_xform =
1297 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1299 if (compress_xform == NULL) {
1301 "Compress xform could not be created\n");
1306 memcpy(compress_xform, ts_params->def_comp_xform,
1307 sizeof(struct rte_comp_xform));
1308 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1310 struct interim_data_params int_data = {
1315 &ts_params->def_decomp_xform,
1319 struct test_data_params test_data = {
1320 RTE_COMP_OP_STATELESS,
1326 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1327 int_data.test_bufs = &compress_test_bufs[i];
1328 int_data.buf_idx = &i;
1330 /* Compress with compressdev, decompress with Zlib */
1331 test_data.zlib_dir = ZLIB_DECOMPRESS;
1332 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1337 /* Compress with Zlib, decompress with compressdev */
1338 test_data.zlib_dir = ZLIB_COMPRESS;
1339 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1348 rte_free(compress_xform);
1353 test_compressdev_deflate_stateless_dynamic(void)
1355 struct comp_testsuite_params *ts_params = &testsuite_params;
1358 struct rte_comp_xform *compress_xform =
1359 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1361 const struct rte_compressdev_capabilities *capab;
1363 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1364 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1366 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1369 if (compress_xform == NULL) {
1371 "Compress xform could not be created\n");
1376 memcpy(compress_xform, ts_params->def_comp_xform,
1377 sizeof(struct rte_comp_xform));
1378 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1380 struct interim_data_params int_data = {
1385 &ts_params->def_decomp_xform,
1389 struct test_data_params test_data = {
1390 RTE_COMP_OP_STATELESS,
1396 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1397 int_data.test_bufs = &compress_test_bufs[i];
1398 int_data.buf_idx = &i;
1400 /* Compress with compressdev, decompress with Zlib */
1401 test_data.zlib_dir = ZLIB_DECOMPRESS;
1402 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1407 /* Compress with Zlib, decompress with compressdev */
1408 test_data.zlib_dir = ZLIB_COMPRESS;
1409 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1418 rte_free(compress_xform);
1423 test_compressdev_deflate_stateless_multi_op(void)
1425 struct comp_testsuite_params *ts_params = &testsuite_params;
1426 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1427 uint16_t buf_idx[num_bufs];
1430 for (i = 0; i < num_bufs; i++)
1433 struct interim_data_params int_data = {
1437 &ts_params->def_comp_xform,
1438 &ts_params->def_decomp_xform,
1442 struct test_data_params test_data = {
1443 RTE_COMP_OP_STATELESS,
1449 /* Compress with compressdev, decompress with Zlib */
1450 test_data.zlib_dir = ZLIB_DECOMPRESS;
1451 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1454 /* Compress with Zlib, decompress with compressdev */
1455 test_data.zlib_dir = ZLIB_COMPRESS;
1456 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1459 return TEST_SUCCESS;
1463 test_compressdev_deflate_stateless_multi_level(void)
1465 struct comp_testsuite_params *ts_params = &testsuite_params;
1469 struct rte_comp_xform *compress_xform =
1470 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1472 if (compress_xform == NULL) {
1474 "Compress xform could not be created\n");
1479 memcpy(compress_xform, ts_params->def_comp_xform,
1480 sizeof(struct rte_comp_xform));
1482 struct interim_data_params int_data = {
1487 &ts_params->def_decomp_xform,
1491 struct test_data_params test_data = {
1492 RTE_COMP_OP_STATELESS,
1498 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1499 int_data.test_bufs = &compress_test_bufs[i];
1500 int_data.buf_idx = &i;
1502 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1504 compress_xform->compress.level = level;
1505 /* Compress with compressdev, decompress with Zlib */
1506 test_data.zlib_dir = ZLIB_DECOMPRESS;
1507 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1517 rte_free(compress_xform);
1521 #define NUM_XFORMS 3
1523 test_compressdev_deflate_stateless_multi_xform(void)
1525 struct comp_testsuite_params *ts_params = &testsuite_params;
1526 uint16_t num_bufs = NUM_XFORMS;
1527 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1528 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1529 const char *test_buffers[NUM_XFORMS];
1531 unsigned int level = RTE_COMP_LEVEL_MIN;
1532 uint16_t buf_idx[num_bufs];
1536 /* Create multiple xforms with various levels */
1537 for (i = 0; i < NUM_XFORMS; i++) {
1538 compress_xforms[i] = rte_malloc(NULL,
1539 sizeof(struct rte_comp_xform), 0);
1540 if (compress_xforms[i] == NULL) {
1542 "Compress xform could not be created\n");
1547 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1548 sizeof(struct rte_comp_xform));
1549 compress_xforms[i]->compress.level = level;
1552 decompress_xforms[i] = rte_malloc(NULL,
1553 sizeof(struct rte_comp_xform), 0);
1554 if (decompress_xforms[i] == NULL) {
1556 "Decompress xform could not be created\n");
1561 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1562 sizeof(struct rte_comp_xform));
1565 for (i = 0; i < NUM_XFORMS; i++) {
1567 /* Use the same buffer in all sessions */
1568 test_buffers[i] = compress_test_bufs[0];
1571 struct interim_data_params int_data = {
1580 struct test_data_params test_data = {
1581 RTE_COMP_OP_STATELESS,
1587 /* Compress with compressdev, decompress with Zlib */
1588 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1595 for (i = 0; i < NUM_XFORMS; i++) {
1596 rte_free(compress_xforms[i]);
1597 rte_free(decompress_xforms[i]);
1604 test_compressdev_deflate_stateless_sgl(void)
1606 struct comp_testsuite_params *ts_params = &testsuite_params;
1608 const struct rte_compressdev_capabilities *capab;
1610 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1611 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1613 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1616 struct interim_data_params int_data = {
1620 &ts_params->def_comp_xform,
1621 &ts_params->def_decomp_xform,
1625 struct test_data_params test_data = {
1626 RTE_COMP_OP_STATELESS,
1632 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1633 int_data.test_bufs = &compress_test_bufs[i];
1634 int_data.buf_idx = &i;
1636 /* Compress with compressdev, decompress with Zlib */
1637 test_data.zlib_dir = ZLIB_DECOMPRESS;
1638 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1641 /* Compress with Zlib, decompress with compressdev */
1642 test_data.zlib_dir = ZLIB_COMPRESS;
1643 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1646 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1647 /* Compress with compressdev, decompress with Zlib */
1648 test_data.zlib_dir = ZLIB_DECOMPRESS;
1649 test_data.buff_type = SGL_TO_LB;
1650 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1653 /* Compress with Zlib, decompress with compressdev */
1654 test_data.zlib_dir = ZLIB_COMPRESS;
1655 test_data.buff_type = SGL_TO_LB;
1656 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1660 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1661 /* Compress with compressdev, decompress with Zlib */
1662 test_data.zlib_dir = ZLIB_DECOMPRESS;
1663 test_data.buff_type = LB_TO_SGL;
1664 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1667 /* Compress with Zlib, decompress with compressdev */
1668 test_data.zlib_dir = ZLIB_COMPRESS;
1669 test_data.buff_type = LB_TO_SGL;
1670 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1677 return TEST_SUCCESS;
1682 test_compressdev_deflate_stateless_checksum(void)
1684 struct comp_testsuite_params *ts_params = &testsuite_params;
1687 const struct rte_compressdev_capabilities *capab;
1689 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1690 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1692 /* Check if driver supports any checksum */
1693 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1694 (capab->comp_feature_flags &
1695 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1696 (capab->comp_feature_flags &
1697 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1700 struct rte_comp_xform *compress_xform =
1701 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1702 if (compress_xform == NULL) {
1703 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1708 memcpy(compress_xform, ts_params->def_comp_xform,
1709 sizeof(struct rte_comp_xform));
1711 struct rte_comp_xform *decompress_xform =
1712 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1713 if (decompress_xform == NULL) {
1714 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1715 rte_free(compress_xform);
1720 memcpy(decompress_xform, ts_params->def_decomp_xform,
1721 sizeof(struct rte_comp_xform));
1723 struct interim_data_params int_data = {
1732 struct test_data_params test_data = {
1733 RTE_COMP_OP_STATELESS,
1739 /* Check if driver supports crc32 checksum and test */
1740 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1741 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1742 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1744 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1745 /* Compress with compressdev, decompress with Zlib */
1746 int_data.test_bufs = &compress_test_bufs[i];
1747 int_data.buf_idx = &i;
1749 /* Generate zlib checksum and test against selected
1750 * drivers decompression checksum
1752 test_data.zlib_dir = ZLIB_COMPRESS;
1753 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1758 /* Generate compression and decompression
1759 * checksum of selected driver
1761 test_data.zlib_dir = ZLIB_NONE;
1762 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1769 /* Check if driver supports adler32 checksum and test */
1770 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1771 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1772 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1774 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1775 int_data.test_bufs = &compress_test_bufs[i];
1776 int_data.buf_idx = &i;
1778 /* Generate zlib checksum and test against selected
1779 * drivers decompression checksum
1781 test_data.zlib_dir = ZLIB_COMPRESS;
1782 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1786 /* Generate compression and decompression
1787 * checksum of selected driver
1789 test_data.zlib_dir = ZLIB_NONE;
1790 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1797 /* Check if driver supports combined crc and adler checksum and test */
1798 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1799 compress_xform->compress.chksum =
1800 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1801 decompress_xform->decompress.chksum =
1802 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1804 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1805 int_data.test_bufs = &compress_test_bufs[i];
1806 int_data.buf_idx = &i;
1808 /* Generate compression and decompression
1809 * checksum of selected driver
1811 test_data.zlib_dir = ZLIB_NONE;
1812 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1822 rte_free(compress_xform);
1823 rte_free(decompress_xform);
1828 test_compressdev_out_of_space_buffer(void)
1830 struct comp_testsuite_params *ts_params = &testsuite_params;
1833 const struct rte_compressdev_capabilities *capab;
1835 RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1837 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1838 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1840 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1843 struct rte_comp_xform *compress_xform =
1844 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1846 if (compress_xform == NULL) {
1848 "Compress xform could not be created\n");
1853 struct interim_data_params int_data = {
1854 &compress_test_bufs[0],
1857 &ts_params->def_comp_xform,
1858 &ts_params->def_decomp_xform,
1862 struct test_data_params test_data = {
1863 RTE_COMP_OP_STATELESS,
1868 /* Compress with compressdev, decompress with Zlib */
1869 test_data.zlib_dir = ZLIB_DECOMPRESS;
1870 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1875 /* Compress with Zlib, decompress with compressdev */
1876 test_data.zlib_dir = ZLIB_COMPRESS;
1877 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1882 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1883 /* Compress with compressdev, decompress with Zlib */
1884 test_data.zlib_dir = ZLIB_DECOMPRESS;
1885 test_data.buff_type = SGL_BOTH;
1886 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1891 /* Compress with Zlib, decompress with compressdev */
1892 test_data.zlib_dir = ZLIB_COMPRESS;
1893 test_data.buff_type = SGL_BOTH;
1894 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1903 rte_free(compress_xform);
1908 static struct unit_test_suite compressdev_testsuite = {
1909 .suite_name = "compressdev unit test suite",
1910 .setup = testsuite_setup,
1911 .teardown = testsuite_teardown,
1912 .unit_test_cases = {
1913 TEST_CASE_ST(NULL, NULL,
1914 test_compressdev_invalid_configuration),
1915 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1916 test_compressdev_deflate_stateless_fixed),
1917 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1918 test_compressdev_deflate_stateless_dynamic),
1919 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1920 test_compressdev_deflate_stateless_multi_op),
1921 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1922 test_compressdev_deflate_stateless_multi_level),
1923 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1924 test_compressdev_deflate_stateless_multi_xform),
1925 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1926 test_compressdev_deflate_stateless_sgl),
1927 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1928 test_compressdev_deflate_stateless_checksum),
1929 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1930 test_compressdev_out_of_space_buffer),
1931 TEST_CASES_END() /**< NULL terminate unit test array */
1936 test_compressdev(void)
1938 return unit_test_suite_runner(&compressdev_testsuite);
1941 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);