1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
47 #define OUT_OF_SPACE_BUF 1
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
55 huffman_type_strings[] = {
56 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
57 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
58 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
69 LB_BOTH = 0, /* both input and output are linear*/
70 SGL_BOTH, /* both input and output are chained */
71 SGL_TO_LB, /* input buffer is chained */
72 LB_TO_SGL /* output buffer is chained */
79 struct comp_testsuite_params {
80 struct rte_mempool *large_mbuf_pool;
81 struct rte_mempool *small_mbuf_pool;
82 struct rte_mempool *big_mbuf_pool;
83 struct rte_mempool *op_pool;
84 struct rte_comp_xform *def_comp_xform;
85 struct rte_comp_xform *def_decomp_xform;
88 struct interim_data_params {
89 const char * const *test_bufs;
90 unsigned int num_bufs;
92 struct rte_comp_xform **compress_xforms;
93 struct rte_comp_xform **decompress_xforms;
94 unsigned int num_xforms;
97 struct test_data_params {
98 enum rte_comp_op_type compress_state;
99 enum rte_comp_op_type decompress_state;
100 enum varied_buff buff_type;
101 enum zlib_direction zlib_dir;
102 unsigned int out_of_space;
103 unsigned int big_data;
104 /* stateful decompression specific parameters */
105 unsigned int decompress_output_block_size;
106 unsigned int decompress_steps_max;
107 /* external mbufs specific parameters */
108 unsigned int use_external_mbufs;
109 unsigned int inbuf_data_size;
110 const struct rte_memzone *inbuf_memzone;
111 const struct rte_memzone *compbuf_memzone;
112 const struct rte_memzone *uncompbuf_memzone;
115 static struct comp_testsuite_params testsuite_params = { 0 };
118 testsuite_teardown(void)
120 struct comp_testsuite_params *ts_params = &testsuite_params;
122 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
123 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
124 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
125 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
126 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
127 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
128 if (rte_mempool_in_use_count(ts_params->op_pool))
129 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
131 rte_mempool_free(ts_params->large_mbuf_pool);
132 rte_mempool_free(ts_params->small_mbuf_pool);
133 rte_mempool_free(ts_params->big_mbuf_pool);
134 rte_mempool_free(ts_params->op_pool);
135 rte_free(ts_params->def_comp_xform);
136 rte_free(ts_params->def_decomp_xform);
140 testsuite_setup(void)
142 struct comp_testsuite_params *ts_params = &testsuite_params;
143 uint32_t max_buf_size = 0;
146 if (rte_compressdev_count() == 0) {
147 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
151 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
152 rte_compressdev_name_get(0));
154 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
155 max_buf_size = RTE_MAX(max_buf_size,
156 strlen(compress_test_bufs[i]) + 1);
159 * Buffers to be used in compression and decompression.
160 * Since decompressed data might be larger than
161 * compressed data (due to block header),
162 * buffers should be big enough for both cases.
164 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
165 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
168 max_buf_size + RTE_PKTMBUF_HEADROOM,
170 if (ts_params->large_mbuf_pool == NULL) {
171 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
175 /* Create mempool with smaller buffers for SGL testing */
176 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
177 NUM_LARGE_MBUFS * MAX_SEGS,
179 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
181 if (ts_params->small_mbuf_pool == NULL) {
182 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
186 /* Create mempool with big buffers for SGL testing */
187 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
190 MAX_MBUF_SEGMENT_SIZE,
192 if (ts_params->big_mbuf_pool == NULL) {
193 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
197 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
198 0, sizeof(struct priv_op_data),
200 if (ts_params->op_pool == NULL) {
201 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
205 ts_params->def_comp_xform =
206 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
207 if (ts_params->def_comp_xform == NULL) {
209 "Default compress xform could not be created\n");
212 ts_params->def_decomp_xform =
213 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
214 if (ts_params->def_decomp_xform == NULL) {
216 "Default decompress xform could not be created\n");
220 /* Initializes default values for compress/decompress xforms */
221 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
222 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
223 ts_params->def_comp_xform->compress.deflate.huffman =
224 RTE_COMP_HUFFMAN_DEFAULT;
225 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
226 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
227 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
229 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
230 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
231 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
232 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
237 testsuite_teardown();
243 generic_ut_setup(void)
245 /* Configure compressdev (one device, one queue pair) */
246 struct rte_compressdev_config config = {
247 .socket_id = rte_socket_id(),
249 .max_nb_priv_xforms = NUM_MAX_XFORMS,
253 if (rte_compressdev_configure(0, &config) < 0) {
254 RTE_LOG(ERR, USER1, "Device configuration failed\n");
258 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
259 rte_socket_id()) < 0) {
260 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
264 if (rte_compressdev_start(0) < 0) {
265 RTE_LOG(ERR, USER1, "Device could not be started\n");
273 generic_ut_teardown(void)
275 rte_compressdev_stop(0);
276 if (rte_compressdev_close(0) < 0)
277 RTE_LOG(ERR, USER1, "Device could not be closed\n");
281 test_compressdev_invalid_configuration(void)
283 struct rte_compressdev_config invalid_config;
284 struct rte_compressdev_config valid_config = {
285 .socket_id = rte_socket_id(),
287 .max_nb_priv_xforms = NUM_MAX_XFORMS,
290 struct rte_compressdev_info dev_info;
292 /* Invalid configuration with 0 queue pairs */
293 memcpy(&invalid_config, &valid_config,
294 sizeof(struct rte_compressdev_config));
295 invalid_config.nb_queue_pairs = 0;
297 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
298 "Device configuration was successful "
299 "with no queue pairs (invalid)\n");
302 * Invalid configuration with too many queue pairs
303 * (if there is an actual maximum number of queue pairs)
305 rte_compressdev_info_get(0, &dev_info);
306 if (dev_info.max_nb_queue_pairs != 0) {
307 memcpy(&invalid_config, &valid_config,
308 sizeof(struct rte_compressdev_config));
309 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
311 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
312 "Device configuration was successful "
313 "with too many queue pairs (invalid)\n");
316 /* Invalid queue pair setup, with no number of queue pairs set */
317 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
318 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
319 "Queue pair setup was successful "
320 "with no queue pairs set (invalid)\n");
326 compare_buffers(const char *buffer1, uint32_t buffer1_len,
327 const char *buffer2, uint32_t buffer2_len)
329 if (buffer1_len != buffer2_len) {
330 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
334 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
335 RTE_LOG(ERR, USER1, "Buffers are different\n");
343 * Maps compressdev and Zlib flush flags
346 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
349 case RTE_COMP_FLUSH_NONE:
351 case RTE_COMP_FLUSH_SYNC:
353 case RTE_COMP_FLUSH_FULL:
355 case RTE_COMP_FLUSH_FINAL:
358 * There should be only the values above,
359 * so this should never happen
367 compress_zlib(struct rte_comp_op *op,
368 const struct rte_comp_xform *xform, int mem_level)
372 int strategy, window_bits, comp_level;
373 int ret = TEST_FAILED;
374 uint8_t *single_src_buf = NULL;
375 uint8_t *single_dst_buf = NULL;
377 /* initialize zlib stream */
378 stream.zalloc = Z_NULL;
379 stream.zfree = Z_NULL;
380 stream.opaque = Z_NULL;
382 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
385 strategy = Z_DEFAULT_STRATEGY;
388 * Window bits is the base two logarithm of the window size (in bytes).
389 * When doing raw DEFLATE, this number will be negative.
391 window_bits = -(xform->compress.window_size);
392 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
394 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
395 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
397 comp_level = xform->compress.level;
399 if (comp_level != RTE_COMP_LEVEL_NONE)
400 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
401 window_bits, mem_level, strategy);
403 ret = deflateInit(&stream, Z_NO_COMPRESSION);
406 printf("Zlib deflate could not be initialized\n");
410 /* Assuming stateless operation */
412 if (op->m_src->nb_segs > 1) {
413 single_src_buf = rte_malloc(NULL,
414 rte_pktmbuf_pkt_len(op->m_src), 0);
415 if (single_src_buf == NULL) {
416 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
420 if (rte_pktmbuf_read(op->m_src, op->src.offset,
421 rte_pktmbuf_pkt_len(op->m_src) -
423 single_src_buf) == NULL) {
425 "Buffer could not be read entirely\n");
429 stream.avail_in = op->src.length;
430 stream.next_in = single_src_buf;
433 stream.avail_in = op->src.length;
434 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
438 if (op->m_dst->nb_segs > 1) {
440 single_dst_buf = rte_malloc(NULL,
441 rte_pktmbuf_pkt_len(op->m_dst), 0);
442 if (single_dst_buf == NULL) {
444 "Buffer could not be allocated\n");
448 stream.avail_out = op->m_dst->pkt_len;
449 stream.next_out = single_dst_buf;
451 } else {/* linear output */
452 stream.avail_out = op->m_dst->data_len;
453 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
457 /* Stateless operation, all buffer will be compressed in one go */
458 zlib_flush = map_zlib_flush_flag(op->flush_flag);
459 ret = deflate(&stream, zlib_flush);
461 if (stream.avail_in != 0) {
462 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
466 if (ret != Z_STREAM_END)
469 /* Copy data to destination SGL */
470 if (op->m_dst->nb_segs > 1) {
471 uint32_t remaining_data = stream.total_out;
472 uint8_t *src_data = single_dst_buf;
473 struct rte_mbuf *dst_buf = op->m_dst;
475 while (remaining_data > 0) {
476 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
477 uint8_t *, op->dst.offset);
479 if (remaining_data < dst_buf->data_len) {
480 memcpy(dst_data, src_data, remaining_data);
483 memcpy(dst_data, src_data, dst_buf->data_len);
484 remaining_data -= dst_buf->data_len;
485 src_data += dst_buf->data_len;
486 dst_buf = dst_buf->next;
491 op->consumed = stream.total_in;
492 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
493 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
494 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
495 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
497 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
498 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
499 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
500 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
503 op->produced = stream.total_out;
505 op->status = RTE_COMP_OP_STATUS_SUCCESS;
506 op->output_chksum = stream.adler;
508 deflateReset(&stream);
513 rte_free(single_src_buf);
514 rte_free(single_dst_buf);
520 decompress_zlib(struct rte_comp_op *op,
521 const struct rte_comp_xform *xform)
526 int ret = TEST_FAILED;
527 uint8_t *single_src_buf = NULL;
528 uint8_t *single_dst_buf = NULL;
530 /* initialize zlib stream */
531 stream.zalloc = Z_NULL;
532 stream.zfree = Z_NULL;
533 stream.opaque = Z_NULL;
536 * Window bits is the base two logarithm of the window size (in bytes).
537 * When doing raw DEFLATE, this number will be negative.
539 window_bits = -(xform->decompress.window_size);
540 ret = inflateInit2(&stream, window_bits);
543 printf("Zlib deflate could not be initialized\n");
547 /* Assuming stateless operation */
549 if (op->m_src->nb_segs > 1) {
550 single_src_buf = rte_malloc(NULL,
551 rte_pktmbuf_pkt_len(op->m_src), 0);
552 if (single_src_buf == NULL) {
553 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
556 single_dst_buf = rte_malloc(NULL,
557 rte_pktmbuf_pkt_len(op->m_dst), 0);
558 if (single_dst_buf == NULL) {
559 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
562 if (rte_pktmbuf_read(op->m_src, 0,
563 rte_pktmbuf_pkt_len(op->m_src),
564 single_src_buf) == NULL) {
566 "Buffer could not be read entirely\n");
570 stream.avail_in = op->src.length;
571 stream.next_in = single_src_buf;
572 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
573 stream.next_out = single_dst_buf;
576 stream.avail_in = op->src.length;
577 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
578 stream.avail_out = op->m_dst->data_len;
579 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
582 /* Stateless operation, all buffer will be compressed in one go */
583 zlib_flush = map_zlib_flush_flag(op->flush_flag);
584 ret = inflate(&stream, zlib_flush);
586 if (stream.avail_in != 0) {
587 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
591 if (ret != Z_STREAM_END)
594 if (op->m_src->nb_segs > 1) {
595 uint32_t remaining_data = stream.total_out;
596 uint8_t *src_data = single_dst_buf;
597 struct rte_mbuf *dst_buf = op->m_dst;
599 while (remaining_data > 0) {
600 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
603 if (remaining_data < dst_buf->data_len) {
604 memcpy(dst_data, src_data, remaining_data);
607 memcpy(dst_data, src_data, dst_buf->data_len);
608 remaining_data -= dst_buf->data_len;
609 src_data += dst_buf->data_len;
610 dst_buf = dst_buf->next;
615 op->consumed = stream.total_in;
616 op->produced = stream.total_out;
617 op->status = RTE_COMP_OP_STATUS_SUCCESS;
619 inflateReset(&stream);
629 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
630 uint32_t total_data_size,
631 struct rte_mempool *small_mbuf_pool,
632 struct rte_mempool *large_mbuf_pool,
633 uint8_t limit_segs_in_sgl,
636 uint32_t remaining_data = total_data_size;
637 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
638 struct rte_mempool *pool;
639 struct rte_mbuf *next_seg;
642 const char *data_ptr = test_buf;
646 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
647 num_remaining_segs = limit_segs_in_sgl - 1;
650 * Allocate data in the first segment (header) and
651 * copy data if test buffer is provided
653 if (remaining_data < seg_size)
654 data_size = remaining_data;
656 data_size = seg_size;
657 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
658 if (buf_ptr == NULL) {
660 "Not enough space in the 1st buffer\n");
664 if (data_ptr != NULL) {
665 /* Copy characters without NULL terminator */
666 strncpy(buf_ptr, data_ptr, data_size);
667 data_ptr += data_size;
669 remaining_data -= data_size;
670 num_remaining_segs--;
673 * Allocate the rest of the segments,
674 * copy the rest of the data and chain the segments.
676 for (i = 0; i < num_remaining_segs; i++) {
678 if (i == (num_remaining_segs - 1)) {
680 if (remaining_data > seg_size)
681 pool = large_mbuf_pool;
683 pool = small_mbuf_pool;
684 data_size = remaining_data;
686 data_size = seg_size;
687 pool = small_mbuf_pool;
690 next_seg = rte_pktmbuf_alloc(pool);
691 if (next_seg == NULL) {
693 "New segment could not be allocated "
694 "from the mempool\n");
697 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
698 if (buf_ptr == NULL) {
700 "Not enough space in the buffer\n");
701 rte_pktmbuf_free(next_seg);
704 if (data_ptr != NULL) {
705 /* Copy characters without NULL terminator */
706 strncpy(buf_ptr, data_ptr, data_size);
707 data_ptr += data_size;
709 remaining_data -= data_size;
711 ret = rte_pktmbuf_chain(head_buf, next_seg);
713 rte_pktmbuf_free(next_seg);
715 "Segment could not chained\n");
724 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
729 * Compresses and decompresses buffer with compressdev API and Zlib API
732 test_deflate_comp_decomp(const struct interim_data_params *int_data,
733 const struct test_data_params *test_data)
735 struct comp_testsuite_params *ts_params = &testsuite_params;
736 const char * const *test_bufs = int_data->test_bufs;
737 unsigned int num_bufs = int_data->num_bufs;
738 uint16_t *buf_idx = int_data->buf_idx;
739 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
740 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
741 unsigned int num_xforms = int_data->num_xforms;
742 enum rte_comp_op_type compress_state = test_data->compress_state;
743 enum rte_comp_op_type decompress_state = test_data->decompress_state;
744 unsigned int buff_type = test_data->buff_type;
745 unsigned int out_of_space = test_data->out_of_space;
746 unsigned int big_data = test_data->big_data;
747 enum zlib_direction zlib_dir = test_data->zlib_dir;
748 int ret_status = TEST_FAILED;
749 struct rte_mbuf_ext_shared_info inbuf_info;
750 struct rte_mbuf_ext_shared_info compbuf_info;
751 struct rte_mbuf_ext_shared_info decompbuf_info;
753 struct rte_mbuf *uncomp_bufs[num_bufs];
754 struct rte_mbuf *comp_bufs[num_bufs];
755 struct rte_comp_op *ops[num_bufs];
756 struct rte_comp_op *ops_processed[num_bufs];
757 void *priv_xforms[num_bufs];
758 uint16_t num_enqd, num_deqd, num_total_deqd;
759 uint16_t num_priv_xforms = 0;
760 unsigned int deqd_retries = 0;
761 struct priv_op_data *priv_data;
764 struct rte_mempool *buf_pool;
766 /* Compressing with CompressDev */
767 unsigned int oos_zlib_decompress =
768 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
769 /* Decompressing with CompressDev */
770 unsigned int oos_zlib_compress =
771 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
772 const struct rte_compressdev_capabilities *capa =
773 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
774 char *contig_buf = NULL;
775 uint64_t compress_checksum[num_bufs];
777 char *all_decomp_data = NULL;
778 unsigned int decomp_produced_data_size = 0;
779 unsigned int step = 0;
781 TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
782 "Number of stateful operations in a step should be 1");
786 "Compress device does not support DEFLATE\n");
790 /* Initialize all arrays to NULL */
791 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
792 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
793 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
794 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
795 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
797 if (decompress_state == RTE_COMP_OP_STATEFUL) {
798 data_size = strlen(test_bufs[0]) + 1;
799 all_decomp_data = rte_malloc(NULL, data_size,
800 RTE_CACHE_LINE_SIZE);
804 buf_pool = ts_params->big_mbuf_pool;
805 else if (buff_type == SGL_BOTH)
806 buf_pool = ts_params->small_mbuf_pool;
808 buf_pool = ts_params->large_mbuf_pool;
810 /* Prepare the source mbufs with the data */
811 ret = rte_pktmbuf_alloc_bulk(buf_pool,
812 uncomp_bufs, num_bufs);
815 "Source mbufs could not be allocated "
816 "from the mempool\n");
820 if (test_data->use_external_mbufs) {
821 inbuf_info.free_cb = extbuf_free_callback;
822 inbuf_info.fcb_opaque = NULL;
823 rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
824 for (i = 0; i < num_bufs; i++) {
825 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
826 test_data->inbuf_memzone->addr,
827 test_data->inbuf_memzone->iova,
828 test_data->inbuf_data_size,
830 rte_pktmbuf_append(uncomp_bufs[i],
831 test_data->inbuf_data_size);
833 } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
834 for (i = 0; i < num_bufs; i++) {
835 data_size = strlen(test_bufs[i]) + 1;
836 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
838 big_data ? buf_pool : ts_params->small_mbuf_pool,
839 big_data ? buf_pool : ts_params->large_mbuf_pool,
840 big_data ? 0 : MAX_SEGS,
841 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
845 for (i = 0; i < num_bufs; i++) {
846 data_size = strlen(test_bufs[i]) + 1;
847 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
848 strlcpy(buf_ptr, test_bufs[i], data_size);
852 /* Prepare the destination mbufs */
853 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
856 "Destination mbufs could not be allocated "
857 "from the mempool\n");
861 if (test_data->use_external_mbufs) {
862 compbuf_info.free_cb = extbuf_free_callback;
863 compbuf_info.fcb_opaque = NULL;
864 rte_mbuf_ext_refcnt_set(&compbuf_info, 1);
865 for (i = 0; i < num_bufs; i++) {
866 rte_pktmbuf_attach_extbuf(comp_bufs[i],
867 test_data->compbuf_memzone->addr,
868 test_data->compbuf_memzone->iova,
869 test_data->compbuf_memzone->len,
871 rte_pktmbuf_append(comp_bufs[i],
872 test_data->compbuf_memzone->len);
874 } else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
875 for (i = 0; i < num_bufs; i++) {
876 if (out_of_space == 1 && oos_zlib_decompress)
877 data_size = OUT_OF_SPACE_BUF;
879 (data_size = strlen(test_bufs[i]) *
880 COMPRESS_BUF_SIZE_RATIO);
882 if (prepare_sgl_bufs(NULL, comp_bufs[i],
884 big_data ? buf_pool : ts_params->small_mbuf_pool,
885 big_data ? buf_pool : ts_params->large_mbuf_pool,
886 big_data ? 0 : MAX_SEGS,
887 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
893 for (i = 0; i < num_bufs; i++) {
894 if (out_of_space == 1 && oos_zlib_decompress)
895 data_size = OUT_OF_SPACE_BUF;
897 (data_size = strlen(test_bufs[i]) *
898 COMPRESS_BUF_SIZE_RATIO);
900 rte_pktmbuf_append(comp_bufs[i], data_size);
904 /* Build the compression operations */
905 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
908 "Compress operations could not be allocated "
909 "from the mempool\n");
914 for (i = 0; i < num_bufs; i++) {
915 ops[i]->m_src = uncomp_bufs[i];
916 ops[i]->m_dst = comp_bufs[i];
917 ops[i]->src.offset = 0;
918 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
919 ops[i]->dst.offset = 0;
920 if (compress_state == RTE_COMP_OP_STATELESS)
921 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
924 "Stateful operations are not supported "
925 "in these tests yet\n");
928 ops[i]->input_chksum = 0;
930 * Store original operation index in private data,
931 * since ordering does not have to be maintained,
932 * when dequeueing from compressdev, so a comparison
933 * at the end of the test can be done.
935 priv_data = (struct priv_op_data *) (ops[i] + 1);
936 priv_data->orig_idx = i;
939 /* Compress data (either with Zlib API or compressdev API */
940 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
941 for (i = 0; i < num_bufs; i++) {
942 const struct rte_comp_xform *compress_xform =
943 compress_xforms[i % num_xforms];
944 ret = compress_zlib(ops[i], compress_xform,
949 ops_processed[i] = ops[i];
952 /* Create compress private xform data */
953 for (i = 0; i < num_xforms; i++) {
954 ret = rte_compressdev_private_xform_create(0,
955 (const struct rte_comp_xform *)compress_xforms[i],
959 "Compression private xform "
960 "could not be created\n");
966 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
967 /* Attach shareable private xform data to ops */
968 for (i = 0; i < num_bufs; i++)
969 ops[i]->private_xform = priv_xforms[i % num_xforms];
971 /* Create rest of the private xforms for the other ops */
972 for (i = num_xforms; i < num_bufs; i++) {
973 ret = rte_compressdev_private_xform_create(0,
974 compress_xforms[i % num_xforms],
978 "Compression private xform "
979 "could not be created\n");
985 /* Attach non shareable private xform data to ops */
986 for (i = 0; i < num_bufs; i++)
987 ops[i]->private_xform = priv_xforms[i];
990 /* Enqueue and dequeue all operations */
991 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
992 if (num_enqd < num_bufs) {
994 "The operations could not be enqueued\n");
1001 * If retrying a dequeue call, wait for 10 ms to allow
1002 * enough time to the driver to process the operations
1004 if (deqd_retries != 0) {
1006 * Avoid infinite loop if not all the
1007 * operations get out of the device
1009 if (deqd_retries == MAX_DEQD_RETRIES) {
1011 "Not all operations could be "
1015 usleep(DEQUEUE_WAIT_TIME);
1017 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1018 &ops_processed[num_total_deqd], num_bufs);
1019 num_total_deqd += num_deqd;
1022 } while (num_total_deqd < num_enqd);
1026 /* Free compress private xforms */
1027 for (i = 0; i < num_priv_xforms; i++) {
1028 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1029 priv_xforms[i] = NULL;
1031 num_priv_xforms = 0;
1034 for (i = 0; i < num_bufs; i++) {
1035 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1036 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1037 const struct rte_comp_compress_xform *compress_xform =
1038 &compress_xforms[xform_idx]->compress;
1039 enum rte_comp_huffman huffman_type =
1040 compress_xform->deflate.huffman;
1041 char engine[] = "zlib (directly, not PMD)";
1042 if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
1043 strlcpy(engine, "PMD", sizeof(engine));
1045 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1046 " %u bytes (level = %d, huffman = %s)\n",
1047 buf_idx[priv_data->orig_idx], engine,
1048 ops_processed[i]->consumed, ops_processed[i]->produced,
1049 compress_xform->level,
1050 huffman_type_strings[huffman_type]);
1051 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1052 ops_processed[i]->consumed == 0 ? 0 :
1053 (float)ops_processed[i]->produced /
1054 ops_processed[i]->consumed * 100);
1055 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1056 compress_checksum[i] = ops_processed[i]->output_chksum;
1061 * Check operation status and free source mbufs (destination mbuf and
1062 * compress operation information is needed for the decompression stage)
1064 for (i = 0; i < num_bufs; i++) {
1065 if (out_of_space && oos_zlib_decompress) {
1066 if (ops_processed[i]->status !=
1067 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1068 ret_status = TEST_FAILED;
1070 "Operation without expected out of "
1071 "space status error\n");
1077 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1079 "Some operations were not successful\n");
1082 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1083 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1084 uncomp_bufs[priv_data->orig_idx] = NULL;
1087 if (out_of_space && oos_zlib_decompress) {
1088 ret_status = TEST_SUCCESS;
1092 /* Allocate buffers for decompressed data */
1093 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1096 "Destination mbufs could not be allocated "
1097 "from the mempool\n");
1101 if (test_data->use_external_mbufs) {
1102 decompbuf_info.free_cb = extbuf_free_callback;
1103 decompbuf_info.fcb_opaque = NULL;
1104 rte_mbuf_ext_refcnt_set(&decompbuf_info, 1);
1105 for (i = 0; i < num_bufs; i++) {
1106 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
1107 test_data->uncompbuf_memzone->addr,
1108 test_data->uncompbuf_memzone->iova,
1109 test_data->uncompbuf_memzone->len,
1111 rte_pktmbuf_append(uncomp_bufs[i],
1112 test_data->uncompbuf_memzone->len);
1114 } else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1115 for (i = 0; i < num_bufs; i++) {
1116 priv_data = (struct priv_op_data *)
1117 (ops_processed[i] + 1);
1118 if (out_of_space == 1 && oos_zlib_compress)
1119 data_size = OUT_OF_SPACE_BUF;
1120 else if (test_data->decompress_output_block_size != 0)
1122 test_data->decompress_output_block_size;
1125 strlen(test_bufs[priv_data->orig_idx]) + 1;
1127 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1129 big_data ? buf_pool : ts_params->small_mbuf_pool,
1130 big_data ? buf_pool : ts_params->large_mbuf_pool,
1131 big_data ? 0 : MAX_SEGS,
1132 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1138 for (i = 0; i < num_bufs; i++) {
1139 priv_data = (struct priv_op_data *)
1140 (ops_processed[i] + 1);
1141 if (out_of_space == 1 && oos_zlib_compress)
1142 data_size = OUT_OF_SPACE_BUF;
1143 else if (test_data->decompress_output_block_size != 0)
1145 test_data->decompress_output_block_size;
1148 strlen(test_bufs[priv_data->orig_idx]) + 1;
1150 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1154 /* Build the decompression operations */
1155 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1158 "Decompress operations could not be allocated "
1159 "from the mempool\n");
1163 /* Source buffer is the compressed data from the previous operations */
1164 for (i = 0; i < num_bufs; i++) {
1165 ops[i]->m_src = ops_processed[i]->m_dst;
1166 ops[i]->m_dst = uncomp_bufs[i];
1167 ops[i]->src.offset = 0;
1169 * Set the length of the compressed data to the
1170 * number of bytes that were produced in the previous stage
1172 ops[i]->src.length = ops_processed[i]->produced;
1174 ops[i]->dst.offset = 0;
1175 if (decompress_state == RTE_COMP_OP_STATELESS) {
1176 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1177 ops[i]->op_type = RTE_COMP_OP_STATELESS;
1178 } else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
1179 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1180 ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1183 "Stateful operations are not supported "
1184 "in these tests yet\n");
1187 ops[i]->input_chksum = 0;
1189 * Copy private data from previous operations,
1190 * to keep the pointer to the original buffer
1192 memcpy(ops[i] + 1, ops_processed[i] + 1,
1193 sizeof(struct priv_op_data));
1197 * Free the previous compress operations,
1198 * as they are not needed anymore
1200 rte_comp_op_bulk_free(ops_processed, num_bufs);
1202 /* Decompress data (either with Zlib API or compressdev API */
1203 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1204 for (i = 0; i < num_bufs; i++) {
1205 priv_data = (struct priv_op_data *)(ops[i] + 1);
1206 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1207 const struct rte_comp_xform *decompress_xform =
1208 decompress_xforms[xform_idx];
1210 ret = decompress_zlib(ops[i], decompress_xform);
1214 ops_processed[i] = ops[i];
1217 if (decompress_state == RTE_COMP_OP_STATELESS) {
1218 /* Create decompress private xform data */
1219 for (i = 0; i < num_xforms; i++) {
1220 ret = rte_compressdev_private_xform_create(0,
1221 (const struct rte_comp_xform *)
1222 decompress_xforms[i],
1226 "Decompression private xform "
1227 "could not be created\n");
1233 if (capa->comp_feature_flags &
1234 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1235 /* Attach shareable private xform data to ops */
1236 for (i = 0; i < num_bufs; i++) {
1237 priv_data = (struct priv_op_data *)
1239 uint16_t xform_idx =
1240 priv_data->orig_idx % num_xforms;
1241 ops[i]->private_xform =
1242 priv_xforms[xform_idx];
1245 /* Create rest of the private xforms */
1246 /* for the other ops */
1247 for (i = num_xforms; i < num_bufs; i++) {
1249 rte_compressdev_private_xform_create(0,
1250 decompress_xforms[i % num_xforms],
1254 "Decompression private xform could not be created\n");
1260 /* Attach non shareable private xform data */
1262 for (i = 0; i < num_bufs; i++) {
1263 priv_data = (struct priv_op_data *)
1265 uint16_t xform_idx =
1266 priv_data->orig_idx;
1267 ops[i]->private_xform =
1268 priv_xforms[xform_idx];
1272 /* Create a stream object for stateful decompression */
1273 ret = rte_compressdev_stream_create(0,
1274 decompress_xforms[0], &stream);
1277 "Decompression stream could not be created, error %d\n",
1281 /* Attach stream to ops */
1282 for (i = 0; i < num_bufs; i++)
1283 ops[i]->stream = stream;
1287 /* Enqueue and dequeue all operations */
1288 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1289 if (num_enqd < num_bufs) {
1291 "The operations could not be enqueued\n");
1298 * If retrying a dequeue call, wait for 10 ms to allow
1299 * enough time to the driver to process the operations
1301 if (deqd_retries != 0) {
1303 * Avoid infinite loop if not all the
1304 * operations get out of the device
1306 if (deqd_retries == MAX_DEQD_RETRIES) {
1308 "Not all operations could be "
1312 usleep(DEQUEUE_WAIT_TIME);
1314 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1315 &ops_processed[num_total_deqd], num_bufs);
1316 num_total_deqd += num_deqd;
1318 } while (num_total_deqd < num_enqd);
1323 for (i = 0; i < num_bufs; i++) {
1324 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1325 char engine[] = "zlib, (directly, no PMD)";
1326 if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1327 strlcpy(engine, "pmd", sizeof(engine));
1328 RTE_LOG(DEBUG, USER1,
1329 "Buffer %u decompressed by %s from %u to %u bytes\n",
1330 buf_idx[priv_data->orig_idx], engine,
1331 ops_processed[i]->consumed, ops_processed[i]->produced);
1336 * Check operation status and free source mbuf (destination mbuf and
1337 * compress operation information is still needed)
1339 for (i = 0; i < num_bufs; i++) {
1340 if (out_of_space && oos_zlib_compress) {
1341 if (ops_processed[i]->status !=
1342 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1343 ret_status = TEST_FAILED;
1345 "Operation without expected out of "
1346 "space status error\n");
1352 if (decompress_state == RTE_COMP_OP_STATEFUL
1353 && (ops_processed[i]->status ==
1354 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1355 || ops_processed[i]->status ==
1356 RTE_COMP_OP_STATUS_SUCCESS)) {
1357 /* collect the output into all_decomp_data */
1358 const void *ptr = rte_pktmbuf_read(
1359 ops_processed[i]->m_dst,
1360 ops_processed[i]->dst.offset,
1361 ops_processed[i]->produced,
1363 decomp_produced_data_size);
1364 if (ptr != all_decomp_data + decomp_produced_data_size)
1365 rte_memcpy(all_decomp_data +
1366 decomp_produced_data_size,
1367 ptr, ops_processed[i]->produced);
1368 decomp_produced_data_size += ops_processed[i]->produced;
1369 if (ops_processed[i]->src.length >
1370 ops_processed[i]->consumed) {
1371 if (ops_processed[i]->status ==
1372 RTE_COMP_OP_STATUS_SUCCESS) {
1375 "Operation finished too early\n");
1379 if (step >= test_data->decompress_steps_max) {
1382 "Operation exceeded maximum steps\n");
1385 ops[i] = ops_processed[i];
1387 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1388 ops[i]->src.offset +=
1389 ops_processed[i]->consumed;
1390 ops[i]->src.length -=
1391 ops_processed[i]->consumed;
1394 /* Compare the original stream with the */
1395 /* decompressed stream (in size and the data) */
1396 priv_data = (struct priv_op_data *)
1397 (ops_processed[i] + 1);
1399 test_bufs[priv_data->orig_idx];
1400 const char *buf2 = all_decomp_data;
1402 if (compare_buffers(buf1, strlen(buf1) + 1,
1403 buf2, decomp_produced_data_size) < 0)
1405 /* Test checksums */
1406 if (compress_xforms[0]->compress.chksum
1407 != RTE_COMP_CHECKSUM_NONE) {
1408 if (ops_processed[i]->output_chksum
1409 != compress_checksum[i]) {
1411 "The checksums differ\n"
1412 "Compression Checksum: %" PRIu64 "\tDecompression "
1413 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1414 ops_processed[i]->output_chksum);
1419 } else if (ops_processed[i]->status !=
1420 RTE_COMP_OP_STATUS_SUCCESS) {
1422 "Some operations were not successful\n");
1425 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1426 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1427 comp_bufs[priv_data->orig_idx] = NULL;
1430 if ((out_of_space && oos_zlib_compress)
1431 || (decompress_state == RTE_COMP_OP_STATEFUL)) {
1432 ret_status = TEST_SUCCESS;
1437 * Compare the original stream with the decompressed stream
1438 * (in size and the data)
1440 for (i = 0; i < num_bufs; i++) {
1441 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1442 const char *buf1 = test_data->use_external_mbufs ?
1443 test_data->inbuf_memzone->addr :
1444 test_bufs[priv_data->orig_idx];
1446 data_size = test_data->use_external_mbufs ?
1447 test_data->inbuf_data_size :
1449 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1450 if (contig_buf == NULL) {
1451 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1456 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1457 ops_processed[i]->produced, contig_buf);
1458 if (compare_buffers(buf1, data_size,
1459 buf2, ops_processed[i]->produced) < 0)
1462 /* Test checksums */
1463 if (compress_xforms[0]->compress.chksum !=
1464 RTE_COMP_CHECKSUM_NONE) {
1465 if (ops_processed[i]->output_chksum !=
1466 compress_checksum[i]) {
1467 RTE_LOG(ERR, USER1, "The checksums differ\n"
1468 "Compression Checksum: %" PRIu64 "\tDecompression "
1469 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1470 ops_processed[i]->output_chksum);
1475 rte_free(contig_buf);
1479 ret_status = TEST_SUCCESS;
1482 /* Free resources */
1483 for (i = 0; i < num_bufs; i++) {
1484 rte_pktmbuf_free(uncomp_bufs[i]);
1485 rte_pktmbuf_free(comp_bufs[i]);
1486 rte_comp_op_free(ops[i]);
1487 rte_comp_op_free(ops_processed[i]);
1489 for (i = 0; i < num_priv_xforms; i++)
1490 if (priv_xforms[i] != NULL)
1491 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1493 rte_compressdev_stream_free(0, stream);
1494 if (all_decomp_data != NULL)
1495 rte_free(all_decomp_data);
1496 rte_free(contig_buf);
1502 test_compressdev_deflate_stateless_fixed(void)
1504 struct comp_testsuite_params *ts_params = &testsuite_params;
1507 const struct rte_compressdev_capabilities *capab;
1509 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1510 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1512 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1515 struct rte_comp_xform *compress_xform =
1516 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1518 if (compress_xform == NULL) {
1520 "Compress xform could not be created\n");
1525 memcpy(compress_xform, ts_params->def_comp_xform,
1526 sizeof(struct rte_comp_xform));
1527 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1529 struct interim_data_params int_data = {
1534 &ts_params->def_decomp_xform,
1538 struct test_data_params test_data = {
1539 .compress_state = RTE_COMP_OP_STATELESS,
1540 .decompress_state = RTE_COMP_OP_STATELESS,
1541 .buff_type = LB_BOTH,
1542 .zlib_dir = ZLIB_DECOMPRESS,
1547 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1548 int_data.test_bufs = &compress_test_bufs[i];
1549 int_data.buf_idx = &i;
1551 /* Compress with compressdev, decompress with Zlib */
1552 test_data.zlib_dir = ZLIB_DECOMPRESS;
1553 ret = test_deflate_comp_decomp(&int_data, &test_data);
1557 /* Compress with Zlib, decompress with compressdev */
1558 test_data.zlib_dir = ZLIB_COMPRESS;
1559 ret = test_deflate_comp_decomp(&int_data, &test_data);
1567 rte_free(compress_xform);
1572 test_compressdev_deflate_stateless_dynamic(void)
1574 struct comp_testsuite_params *ts_params = &testsuite_params;
1577 struct rte_comp_xform *compress_xform =
1578 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1580 const struct rte_compressdev_capabilities *capab;
1582 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1583 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1585 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1588 if (compress_xform == NULL) {
1590 "Compress xform could not be created\n");
1595 memcpy(compress_xform, ts_params->def_comp_xform,
1596 sizeof(struct rte_comp_xform));
1597 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1599 struct interim_data_params int_data = {
1604 &ts_params->def_decomp_xform,
1608 struct test_data_params test_data = {
1609 .compress_state = RTE_COMP_OP_STATELESS,
1610 .decompress_state = RTE_COMP_OP_STATELESS,
1611 .buff_type = LB_BOTH,
1612 .zlib_dir = ZLIB_DECOMPRESS,
1617 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1618 int_data.test_bufs = &compress_test_bufs[i];
1619 int_data.buf_idx = &i;
1621 /* Compress with compressdev, decompress with Zlib */
1622 test_data.zlib_dir = ZLIB_DECOMPRESS;
1623 ret = test_deflate_comp_decomp(&int_data, &test_data);
1627 /* Compress with Zlib, decompress with compressdev */
1628 test_data.zlib_dir = ZLIB_COMPRESS;
1629 ret = test_deflate_comp_decomp(&int_data, &test_data);
1637 rte_free(compress_xform);
1642 test_compressdev_deflate_stateless_multi_op(void)
1644 struct comp_testsuite_params *ts_params = &testsuite_params;
1645 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1646 uint16_t buf_idx[num_bufs];
1650 for (i = 0; i < num_bufs; i++)
1653 struct interim_data_params int_data = {
1657 &ts_params->def_comp_xform,
1658 &ts_params->def_decomp_xform,
1662 struct test_data_params test_data = {
1663 .compress_state = RTE_COMP_OP_STATELESS,
1664 .decompress_state = RTE_COMP_OP_STATELESS,
1665 .buff_type = LB_BOTH,
1666 .zlib_dir = ZLIB_DECOMPRESS,
1671 /* Compress with compressdev, decompress with Zlib */
1672 test_data.zlib_dir = ZLIB_DECOMPRESS;
1673 ret = test_deflate_comp_decomp(&int_data, &test_data);
1677 /* Compress with Zlib, decompress with compressdev */
1678 test_data.zlib_dir = ZLIB_COMPRESS;
1679 ret = test_deflate_comp_decomp(&int_data, &test_data);
1683 return TEST_SUCCESS;
1687 test_compressdev_deflate_stateless_multi_level(void)
1689 struct comp_testsuite_params *ts_params = &testsuite_params;
1693 struct rte_comp_xform *compress_xform =
1694 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1696 if (compress_xform == NULL) {
1698 "Compress xform could not be created\n");
1703 memcpy(compress_xform, ts_params->def_comp_xform,
1704 sizeof(struct rte_comp_xform));
1706 struct interim_data_params int_data = {
1711 &ts_params->def_decomp_xform,
1715 struct test_data_params test_data = {
1716 .compress_state = RTE_COMP_OP_STATELESS,
1717 .decompress_state = RTE_COMP_OP_STATELESS,
1718 .buff_type = LB_BOTH,
1719 .zlib_dir = ZLIB_DECOMPRESS,
1724 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1725 int_data.test_bufs = &compress_test_bufs[i];
1726 int_data.buf_idx = &i;
1728 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1730 compress_xform->compress.level = level;
1731 /* Compress with compressdev, decompress with Zlib */
1732 test_data.zlib_dir = ZLIB_DECOMPRESS;
1733 ret = test_deflate_comp_decomp(&int_data, &test_data);
1742 rte_free(compress_xform);
1746 #define NUM_XFORMS 3
1748 test_compressdev_deflate_stateless_multi_xform(void)
1750 struct comp_testsuite_params *ts_params = &testsuite_params;
1751 uint16_t num_bufs = NUM_XFORMS;
1752 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1753 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1754 const char *test_buffers[NUM_XFORMS];
1756 unsigned int level = RTE_COMP_LEVEL_MIN;
1757 uint16_t buf_idx[num_bufs];
1760 /* Create multiple xforms with various levels */
1761 for (i = 0; i < NUM_XFORMS; i++) {
1762 compress_xforms[i] = rte_malloc(NULL,
1763 sizeof(struct rte_comp_xform), 0);
1764 if (compress_xforms[i] == NULL) {
1766 "Compress xform could not be created\n");
1771 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1772 sizeof(struct rte_comp_xform));
1773 compress_xforms[i]->compress.level = level;
1776 decompress_xforms[i] = rte_malloc(NULL,
1777 sizeof(struct rte_comp_xform), 0);
1778 if (decompress_xforms[i] == NULL) {
1780 "Decompress xform could not be created\n");
1785 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1786 sizeof(struct rte_comp_xform));
1789 for (i = 0; i < NUM_XFORMS; i++) {
1791 /* Use the same buffer in all sessions */
1792 test_buffers[i] = compress_test_bufs[0];
1795 struct interim_data_params int_data = {
1804 struct test_data_params test_data = {
1805 .compress_state = RTE_COMP_OP_STATELESS,
1806 .decompress_state = RTE_COMP_OP_STATELESS,
1807 .buff_type = LB_BOTH,
1808 .zlib_dir = ZLIB_DECOMPRESS,
1813 /* Compress with compressdev, decompress with Zlib */
1814 ret = test_deflate_comp_decomp(&int_data, &test_data);
1821 for (i = 0; i < NUM_XFORMS; i++) {
1822 rte_free(compress_xforms[i]);
1823 rte_free(decompress_xforms[i]);
1830 test_compressdev_deflate_stateless_sgl(void)
1832 struct comp_testsuite_params *ts_params = &testsuite_params;
1835 const struct rte_compressdev_capabilities *capab;
1837 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1838 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1840 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1843 struct interim_data_params int_data = {
1847 &ts_params->def_comp_xform,
1848 &ts_params->def_decomp_xform,
1852 struct test_data_params test_data = {
1853 .compress_state = RTE_COMP_OP_STATELESS,
1854 .decompress_state = RTE_COMP_OP_STATELESS,
1855 .buff_type = SGL_BOTH,
1856 .zlib_dir = ZLIB_DECOMPRESS,
1861 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1862 int_data.test_bufs = &compress_test_bufs[i];
1863 int_data.buf_idx = &i;
1865 /* Compress with compressdev, decompress with Zlib */
1866 test_data.zlib_dir = ZLIB_DECOMPRESS;
1867 ret = test_deflate_comp_decomp(&int_data, &test_data);
1871 /* Compress with Zlib, decompress with compressdev */
1872 test_data.zlib_dir = ZLIB_COMPRESS;
1873 ret = test_deflate_comp_decomp(&int_data, &test_data);
1877 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1878 /* Compress with compressdev, decompress with Zlib */
1879 test_data.zlib_dir = ZLIB_DECOMPRESS;
1880 test_data.buff_type = SGL_TO_LB;
1881 ret = test_deflate_comp_decomp(&int_data, &test_data);
1885 /* Compress with Zlib, decompress with compressdev */
1886 test_data.zlib_dir = ZLIB_COMPRESS;
1887 test_data.buff_type = SGL_TO_LB;
1888 ret = test_deflate_comp_decomp(&int_data, &test_data);
1893 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1894 /* Compress with compressdev, decompress with Zlib */
1895 test_data.zlib_dir = ZLIB_DECOMPRESS;
1896 test_data.buff_type = LB_TO_SGL;
1897 ret = test_deflate_comp_decomp(&int_data, &test_data);
1901 /* Compress with Zlib, decompress with compressdev */
1902 test_data.zlib_dir = ZLIB_COMPRESS;
1903 test_data.buff_type = LB_TO_SGL;
1904 ret = test_deflate_comp_decomp(&int_data, &test_data);
1910 return TEST_SUCCESS;
1914 test_compressdev_deflate_stateless_checksum(void)
1916 struct comp_testsuite_params *ts_params = &testsuite_params;
1919 const struct rte_compressdev_capabilities *capab;
1921 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1922 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1924 /* Check if driver supports any checksum */
1925 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1926 (capab->comp_feature_flags &
1927 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1928 (capab->comp_feature_flags &
1929 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1932 struct rte_comp_xform *compress_xform =
1933 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1934 if (compress_xform == NULL) {
1935 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1939 memcpy(compress_xform, ts_params->def_comp_xform,
1940 sizeof(struct rte_comp_xform));
1942 struct rte_comp_xform *decompress_xform =
1943 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1944 if (decompress_xform == NULL) {
1945 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1946 rte_free(compress_xform);
1950 memcpy(decompress_xform, ts_params->def_decomp_xform,
1951 sizeof(struct rte_comp_xform));
1953 struct interim_data_params int_data = {
1962 struct test_data_params test_data = {
1963 .compress_state = RTE_COMP_OP_STATELESS,
1964 .decompress_state = RTE_COMP_OP_STATELESS,
1965 .buff_type = LB_BOTH,
1966 .zlib_dir = ZLIB_DECOMPRESS,
1971 /* Check if driver supports crc32 checksum and test */
1972 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1973 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1974 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1976 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1977 /* Compress with compressdev, decompress with Zlib */
1978 int_data.test_bufs = &compress_test_bufs[i];
1979 int_data.buf_idx = &i;
1981 /* Generate zlib checksum and test against selected
1982 * drivers decompression checksum
1984 test_data.zlib_dir = ZLIB_COMPRESS;
1985 ret = test_deflate_comp_decomp(&int_data, &test_data);
1989 /* Generate compression and decompression
1990 * checksum of selected driver
1992 test_data.zlib_dir = ZLIB_NONE;
1993 ret = test_deflate_comp_decomp(&int_data, &test_data);
1999 /* Check if driver supports adler32 checksum and test */
2000 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2001 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2002 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2004 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2005 int_data.test_bufs = &compress_test_bufs[i];
2006 int_data.buf_idx = &i;
2008 /* Generate zlib checksum and test against selected
2009 * drivers decompression checksum
2011 test_data.zlib_dir = ZLIB_COMPRESS;
2012 ret = test_deflate_comp_decomp(&int_data, &test_data);
2015 /* Generate compression and decompression
2016 * checksum of selected driver
2018 test_data.zlib_dir = ZLIB_NONE;
2019 ret = test_deflate_comp_decomp(&int_data, &test_data);
2025 /* Check if driver supports combined crc and adler checksum and test */
2026 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2027 compress_xform->compress.chksum =
2028 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2029 decompress_xform->decompress.chksum =
2030 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2032 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2033 int_data.test_bufs = &compress_test_bufs[i];
2034 int_data.buf_idx = &i;
2036 /* Generate compression and decompression
2037 * checksum of selected driver
2039 test_data.zlib_dir = ZLIB_NONE;
2040 ret = test_deflate_comp_decomp(&int_data, &test_data);
2049 rte_free(compress_xform);
2050 rte_free(decompress_xform);
2055 test_compressdev_out_of_space_buffer(void)
2057 struct comp_testsuite_params *ts_params = &testsuite_params;
2060 const struct rte_compressdev_capabilities *capab;
2062 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2064 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2065 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2067 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2070 struct interim_data_params int_data = {
2071 &compress_test_bufs[0],
2074 &ts_params->def_comp_xform,
2075 &ts_params->def_decomp_xform,
2079 struct test_data_params test_data = {
2080 .compress_state = RTE_COMP_OP_STATELESS,
2081 .decompress_state = RTE_COMP_OP_STATELESS,
2082 .buff_type = LB_BOTH,
2083 .zlib_dir = ZLIB_DECOMPRESS,
2084 .out_of_space = 1, /* run out-of-space test */
2087 /* Compress with compressdev, decompress with Zlib */
2088 test_data.zlib_dir = ZLIB_DECOMPRESS;
2089 ret = test_deflate_comp_decomp(&int_data, &test_data);
2093 /* Compress with Zlib, decompress with compressdev */
2094 test_data.zlib_dir = ZLIB_COMPRESS;
2095 ret = test_deflate_comp_decomp(&int_data, &test_data);
2099 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2100 /* Compress with compressdev, decompress with Zlib */
2101 test_data.zlib_dir = ZLIB_DECOMPRESS;
2102 test_data.buff_type = SGL_BOTH;
2103 ret = test_deflate_comp_decomp(&int_data, &test_data);
2107 /* Compress with Zlib, decompress with compressdev */
2108 test_data.zlib_dir = ZLIB_COMPRESS;
2109 test_data.buff_type = SGL_BOTH;
2110 ret = test_deflate_comp_decomp(&int_data, &test_data);
2122 test_compressdev_deflate_stateless_dynamic_big(void)
2124 struct comp_testsuite_params *ts_params = &testsuite_params;
2128 const struct rte_compressdev_capabilities *capab;
2129 char *test_buffer = NULL;
2131 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2132 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2134 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2137 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2140 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2141 if (test_buffer == NULL) {
2143 "Can't allocate buffer for big-data\n");
2147 struct interim_data_params int_data = {
2148 (const char * const *)&test_buffer,
2151 &ts_params->def_comp_xform,
2152 &ts_params->def_decomp_xform,
2156 struct test_data_params test_data = {
2157 .compress_state = RTE_COMP_OP_STATELESS,
2158 .decompress_state = RTE_COMP_OP_STATELESS,
2159 .buff_type = SGL_BOTH,
2160 .zlib_dir = ZLIB_DECOMPRESS,
2165 ts_params->def_comp_xform->compress.deflate.huffman =
2166 RTE_COMP_HUFFMAN_DYNAMIC;
2168 /* fill the buffer with data based on rand. data */
2169 srand(BIG_DATA_TEST_SIZE);
2170 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2171 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2172 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2174 /* Compress with compressdev, decompress with Zlib */
2175 test_data.zlib_dir = ZLIB_DECOMPRESS;
2176 ret = test_deflate_comp_decomp(&int_data, &test_data);
2180 /* Compress with Zlib, decompress with compressdev */
2181 test_data.zlib_dir = ZLIB_COMPRESS;
2182 ret = test_deflate_comp_decomp(&int_data, &test_data);
2189 ts_params->def_comp_xform->compress.deflate.huffman =
2190 RTE_COMP_HUFFMAN_DEFAULT;
2191 rte_free(test_buffer);
2196 test_compressdev_deflate_stateful_decomp(void)
2198 struct comp_testsuite_params *ts_params = &testsuite_params;
2201 const struct rte_compressdev_capabilities *capab;
2203 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2204 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2206 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2209 struct interim_data_params int_data = {
2210 &compress_test_bufs[0],
2213 &ts_params->def_comp_xform,
2214 &ts_params->def_decomp_xform,
2218 struct test_data_params test_data = {
2219 .compress_state = RTE_COMP_OP_STATELESS,
2220 .decompress_state = RTE_COMP_OP_STATEFUL,
2221 .buff_type = LB_BOTH,
2222 .zlib_dir = ZLIB_COMPRESS,
2225 .decompress_output_block_size = 2000,
2226 .decompress_steps_max = 4
2229 /* Compress with Zlib, decompress with compressdev */
2230 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2235 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2236 /* Now test with SGL buffers */
2237 test_data.buff_type = SGL_BOTH;
2238 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2251 test_compressdev_deflate_stateful_decomp_checksum(void)
2253 struct comp_testsuite_params *ts_params = &testsuite_params;
2256 const struct rte_compressdev_capabilities *capab;
2258 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2259 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2261 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2264 /* Check if driver supports any checksum */
2265 if (!(capab->comp_feature_flags &
2266 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2267 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2270 struct rte_comp_xform *compress_xform =
2271 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2272 if (compress_xform == NULL) {
2273 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2277 memcpy(compress_xform, ts_params->def_comp_xform,
2278 sizeof(struct rte_comp_xform));
2280 struct rte_comp_xform *decompress_xform =
2281 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2282 if (decompress_xform == NULL) {
2283 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2284 rte_free(compress_xform);
2288 memcpy(decompress_xform, ts_params->def_decomp_xform,
2289 sizeof(struct rte_comp_xform));
2291 struct interim_data_params int_data = {
2292 &compress_test_bufs[0],
2300 struct test_data_params test_data = {
2301 .compress_state = RTE_COMP_OP_STATELESS,
2302 .decompress_state = RTE_COMP_OP_STATEFUL,
2303 .buff_type = LB_BOTH,
2304 .zlib_dir = ZLIB_COMPRESS,
2307 .decompress_output_block_size = 2000,
2308 .decompress_steps_max = 4
2311 /* Check if driver supports crc32 checksum and test */
2312 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2313 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2314 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2315 /* Compress with Zlib, decompress with compressdev */
2316 test_data.buff_type = LB_BOTH;
2317 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2321 if (capab->comp_feature_flags &
2322 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2323 /* Now test with SGL buffers */
2324 test_data.buff_type = SGL_BOTH;
2325 if (test_deflate_comp_decomp(&int_data,
2333 /* Check if driver supports adler32 checksum and test */
2334 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2335 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2336 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2337 /* Compress with Zlib, decompress with compressdev */
2338 test_data.buff_type = LB_BOTH;
2339 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2343 if (capab->comp_feature_flags &
2344 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2345 /* Now test with SGL buffers */
2346 test_data.buff_type = SGL_BOTH;
2347 if (test_deflate_comp_decomp(&int_data,
2355 /* Check if driver supports combined crc and adler checksum and test */
2356 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2357 compress_xform->compress.chksum =
2358 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2359 decompress_xform->decompress.chksum =
2360 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2361 /* Zlib doesn't support combined checksum */
2362 test_data.zlib_dir = ZLIB_NONE;
2363 /* Compress stateless, decompress stateful with compressdev */
2364 test_data.buff_type = LB_BOTH;
2365 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2369 if (capab->comp_feature_flags &
2370 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2371 /* Now test with SGL buffers */
2372 test_data.buff_type = SGL_BOTH;
2373 if (test_deflate_comp_decomp(&int_data,
2384 rte_free(compress_xform);
2385 rte_free(decompress_xform);
2389 static const struct rte_memzone *
2390 make_memzone(const char *name, size_t size)
2392 unsigned int socket_id = rte_socket_id();
2393 char mz_name[RTE_MEMZONE_NAMESIZE];
2394 const struct rte_memzone *memzone;
2396 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
2397 memzone = rte_memzone_lookup(mz_name);
2398 if (memzone != NULL && memzone->len != size) {
2399 rte_memzone_free(memzone);
2402 if (memzone == NULL) {
2403 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
2404 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2405 if (memzone == NULL)
2406 RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
2413 test_compressdev_external_mbufs(void)
2415 struct comp_testsuite_params *ts_params = &testsuite_params;
2416 size_t data_len = 0;
2418 int ret = TEST_FAILED;
2420 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
2421 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
2423 struct interim_data_params int_data = {
2427 &ts_params->def_comp_xform,
2428 &ts_params->def_decomp_xform,
2432 struct test_data_params test_data = {
2433 .compress_state = RTE_COMP_OP_STATELESS,
2434 .decompress_state = RTE_COMP_OP_STATELESS,
2435 .buff_type = LB_BOTH,
2436 .zlib_dir = ZLIB_DECOMPRESS,
2439 .use_external_mbufs = 1,
2440 .inbuf_data_size = data_len,
2441 .inbuf_memzone = make_memzone("inbuf", data_len),
2442 .compbuf_memzone = make_memzone("compbuf", data_len *
2443 COMPRESS_BUF_SIZE_RATIO),
2444 .uncompbuf_memzone = make_memzone("decompbuf", data_len)
2447 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2448 /* prepare input data */
2449 data_len = strlen(compress_test_bufs[i]) + 1;
2450 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
2452 test_data.inbuf_data_size = data_len;
2453 int_data.buf_idx = &i;
2455 /* Compress with compressdev, decompress with Zlib */
2456 test_data.zlib_dir = ZLIB_DECOMPRESS;
2457 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2460 /* Compress with Zlib, decompress with compressdev */
2461 test_data.zlib_dir = ZLIB_COMPRESS;
2462 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2469 rte_memzone_free(test_data.inbuf_memzone);
2470 rte_memzone_free(test_data.compbuf_memzone);
2471 rte_memzone_free(test_data.uncompbuf_memzone);
2475 static struct unit_test_suite compressdev_testsuite = {
2476 .suite_name = "compressdev unit test suite",
2477 .setup = testsuite_setup,
2478 .teardown = testsuite_teardown,
2479 .unit_test_cases = {
2480 TEST_CASE_ST(NULL, NULL,
2481 test_compressdev_invalid_configuration),
2482 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2483 test_compressdev_deflate_stateless_fixed),
2484 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2485 test_compressdev_deflate_stateless_dynamic),
2486 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2487 test_compressdev_deflate_stateless_dynamic_big),
2488 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2489 test_compressdev_deflate_stateless_multi_op),
2490 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2491 test_compressdev_deflate_stateless_multi_level),
2492 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2493 test_compressdev_deflate_stateless_multi_xform),
2494 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2495 test_compressdev_deflate_stateless_sgl),
2496 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2497 test_compressdev_deflate_stateless_checksum),
2498 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2499 test_compressdev_out_of_space_buffer),
2500 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2501 test_compressdev_deflate_stateful_decomp),
2502 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2503 test_compressdev_deflate_stateful_decomp_checksum),
2504 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2505 test_compressdev_external_mbufs),
2506 TEST_CASES_END() /**< NULL terminate unit test array */
2511 test_compressdev(void)
2513 return unit_test_suite_runner(&compressdev_testsuite);
2516 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);