1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
47 #define OUT_OF_SPACE_BUF 1
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
55 huffman_type_strings[] = {
56 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
57 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
58 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
69 LB_BOTH = 0, /* both input and output are linear*/
70 SGL_BOTH, /* both input and output are chained */
71 SGL_TO_LB, /* input buffer is chained */
72 LB_TO_SGL /* output buffer is chained */
79 struct comp_testsuite_params {
80 struct rte_mempool *large_mbuf_pool;
81 struct rte_mempool *small_mbuf_pool;
82 struct rte_mempool *big_mbuf_pool;
83 struct rte_mempool *op_pool;
84 struct rte_comp_xform *def_comp_xform;
85 struct rte_comp_xform *def_decomp_xform;
88 struct interim_data_params {
89 const char * const *test_bufs;
90 unsigned int num_bufs;
92 struct rte_comp_xform **compress_xforms;
93 struct rte_comp_xform **decompress_xforms;
94 unsigned int num_xforms;
97 struct test_data_params {
98 enum rte_comp_op_type state;
99 enum varied_buff buff_type;
100 enum zlib_direction zlib_dir;
101 unsigned int out_of_space;
102 unsigned int big_data;
105 static struct comp_testsuite_params testsuite_params = { 0 };
108 testsuite_teardown(void)
110 struct comp_testsuite_params *ts_params = &testsuite_params;
112 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
113 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
114 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
115 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
116 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
117 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
118 if (rte_mempool_in_use_count(ts_params->op_pool))
119 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
121 rte_mempool_free(ts_params->large_mbuf_pool);
122 rte_mempool_free(ts_params->small_mbuf_pool);
123 rte_mempool_free(ts_params->big_mbuf_pool);
124 rte_mempool_free(ts_params->op_pool);
125 rte_free(ts_params->def_comp_xform);
126 rte_free(ts_params->def_decomp_xform);
130 testsuite_setup(void)
132 struct comp_testsuite_params *ts_params = &testsuite_params;
133 uint32_t max_buf_size = 0;
136 if (rte_compressdev_count() == 0) {
137 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
141 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
142 rte_compressdev_name_get(0));
144 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
145 max_buf_size = RTE_MAX(max_buf_size,
146 strlen(compress_test_bufs[i]) + 1);
149 * Buffers to be used in compression and decompression.
150 * Since decompressed data might be larger than
151 * compressed data (due to block header),
152 * buffers should be big enough for both cases.
154 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
155 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
158 max_buf_size + RTE_PKTMBUF_HEADROOM,
160 if (ts_params->large_mbuf_pool == NULL) {
161 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
165 /* Create mempool with smaller buffers for SGL testing */
166 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
167 NUM_LARGE_MBUFS * MAX_SEGS,
169 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
171 if (ts_params->small_mbuf_pool == NULL) {
172 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
176 /* Create mempool with big buffers for SGL testing */
177 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
180 MAX_MBUF_SEGMENT_SIZE,
182 if (ts_params->big_mbuf_pool == NULL) {
183 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
187 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
188 0, sizeof(struct priv_op_data),
190 if (ts_params->op_pool == NULL) {
191 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
195 ts_params->def_comp_xform =
196 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
197 if (ts_params->def_comp_xform == NULL) {
199 "Default compress xform could not be created\n");
202 ts_params->def_decomp_xform =
203 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
204 if (ts_params->def_decomp_xform == NULL) {
206 "Default decompress xform could not be created\n");
210 /* Initializes default values for compress/decompress xforms */
211 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
212 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
213 ts_params->def_comp_xform->compress.deflate.huffman =
214 RTE_COMP_HUFFMAN_DEFAULT;
215 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
216 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
217 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
219 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
220 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
221 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
222 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
227 testsuite_teardown();
233 generic_ut_setup(void)
235 /* Configure compressdev (one device, one queue pair) */
236 struct rte_compressdev_config config = {
237 .socket_id = rte_socket_id(),
239 .max_nb_priv_xforms = NUM_MAX_XFORMS,
243 if (rte_compressdev_configure(0, &config) < 0) {
244 RTE_LOG(ERR, USER1, "Device configuration failed\n");
248 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
249 rte_socket_id()) < 0) {
250 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
254 if (rte_compressdev_start(0) < 0) {
255 RTE_LOG(ERR, USER1, "Device could not be started\n");
263 generic_ut_teardown(void)
265 rte_compressdev_stop(0);
266 if (rte_compressdev_close(0) < 0)
267 RTE_LOG(ERR, USER1, "Device could not be closed\n");
271 test_compressdev_invalid_configuration(void)
273 struct rte_compressdev_config invalid_config;
274 struct rte_compressdev_config valid_config = {
275 .socket_id = rte_socket_id(),
277 .max_nb_priv_xforms = NUM_MAX_XFORMS,
280 struct rte_compressdev_info dev_info;
282 /* Invalid configuration with 0 queue pairs */
283 memcpy(&invalid_config, &valid_config,
284 sizeof(struct rte_compressdev_config));
285 invalid_config.nb_queue_pairs = 0;
287 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
288 "Device configuration was successful "
289 "with no queue pairs (invalid)\n");
292 * Invalid configuration with too many queue pairs
293 * (if there is an actual maximum number of queue pairs)
295 rte_compressdev_info_get(0, &dev_info);
296 if (dev_info.max_nb_queue_pairs != 0) {
297 memcpy(&invalid_config, &valid_config,
298 sizeof(struct rte_compressdev_config));
299 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
301 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
302 "Device configuration was successful "
303 "with too many queue pairs (invalid)\n");
306 /* Invalid queue pair setup, with no number of queue pairs set */
307 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
308 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
309 "Queue pair setup was successful "
310 "with no queue pairs set (invalid)\n");
316 compare_buffers(const char *buffer1, uint32_t buffer1_len,
317 const char *buffer2, uint32_t buffer2_len)
319 if (buffer1_len != buffer2_len) {
320 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
324 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
325 RTE_LOG(ERR, USER1, "Buffers are different\n");
333 * Maps compressdev and Zlib flush flags
336 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
339 case RTE_COMP_FLUSH_NONE:
341 case RTE_COMP_FLUSH_SYNC:
343 case RTE_COMP_FLUSH_FULL:
345 case RTE_COMP_FLUSH_FINAL:
348 * There should be only the values above,
349 * so this should never happen
357 compress_zlib(struct rte_comp_op *op,
358 const struct rte_comp_xform *xform, int mem_level)
362 int strategy, window_bits, comp_level;
363 int ret = TEST_FAILED;
364 uint8_t *single_src_buf = NULL;
365 uint8_t *single_dst_buf = NULL;
367 /* initialize zlib stream */
368 stream.zalloc = Z_NULL;
369 stream.zfree = Z_NULL;
370 stream.opaque = Z_NULL;
372 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
375 strategy = Z_DEFAULT_STRATEGY;
378 * Window bits is the base two logarithm of the window size (in bytes).
379 * When doing raw DEFLATE, this number will be negative.
381 window_bits = -(xform->compress.window_size);
382 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
384 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
385 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
387 comp_level = xform->compress.level;
389 if (comp_level != RTE_COMP_LEVEL_NONE)
390 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
391 window_bits, mem_level, strategy);
393 ret = deflateInit(&stream, Z_NO_COMPRESSION);
396 printf("Zlib deflate could not be initialized\n");
400 /* Assuming stateless operation */
402 if (op->m_src->nb_segs > 1) {
403 single_src_buf = rte_malloc(NULL,
404 rte_pktmbuf_pkt_len(op->m_src), 0);
405 if (single_src_buf == NULL) {
406 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
410 if (rte_pktmbuf_read(op->m_src, op->src.offset,
411 rte_pktmbuf_pkt_len(op->m_src) -
413 single_src_buf) == NULL) {
415 "Buffer could not be read entirely\n");
419 stream.avail_in = op->src.length;
420 stream.next_in = single_src_buf;
423 stream.avail_in = op->src.length;
424 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
428 if (op->m_dst->nb_segs > 1) {
430 single_dst_buf = rte_malloc(NULL,
431 rte_pktmbuf_pkt_len(op->m_dst), 0);
432 if (single_dst_buf == NULL) {
434 "Buffer could not be allocated\n");
438 stream.avail_out = op->m_dst->pkt_len;
439 stream.next_out = single_dst_buf;
441 } else {/* linear output */
442 stream.avail_out = op->m_dst->data_len;
443 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
447 /* Stateless operation, all buffer will be compressed in one go */
448 zlib_flush = map_zlib_flush_flag(op->flush_flag);
449 ret = deflate(&stream, zlib_flush);
451 if (stream.avail_in != 0) {
452 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
456 if (ret != Z_STREAM_END)
459 /* Copy data to destination SGL */
460 if (op->m_dst->nb_segs > 1) {
461 uint32_t remaining_data = stream.total_out;
462 uint8_t *src_data = single_dst_buf;
463 struct rte_mbuf *dst_buf = op->m_dst;
465 while (remaining_data > 0) {
466 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
467 uint8_t *, op->dst.offset);
469 if (remaining_data < dst_buf->data_len) {
470 memcpy(dst_data, src_data, remaining_data);
473 memcpy(dst_data, src_data, dst_buf->data_len);
474 remaining_data -= dst_buf->data_len;
475 src_data += dst_buf->data_len;
476 dst_buf = dst_buf->next;
481 op->consumed = stream.total_in;
482 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
483 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
484 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
485 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
487 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
488 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
489 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
490 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
493 op->produced = stream.total_out;
495 op->status = RTE_COMP_OP_STATUS_SUCCESS;
496 op->output_chksum = stream.adler;
498 deflateReset(&stream);
503 rte_free(single_src_buf);
504 rte_free(single_dst_buf);
510 decompress_zlib(struct rte_comp_op *op,
511 const struct rte_comp_xform *xform)
516 int ret = TEST_FAILED;
517 uint8_t *single_src_buf = NULL;
518 uint8_t *single_dst_buf = NULL;
520 /* initialize zlib stream */
521 stream.zalloc = Z_NULL;
522 stream.zfree = Z_NULL;
523 stream.opaque = Z_NULL;
526 * Window bits is the base two logarithm of the window size (in bytes).
527 * When doing raw DEFLATE, this number will be negative.
529 window_bits = -(xform->decompress.window_size);
530 ret = inflateInit2(&stream, window_bits);
533 printf("Zlib deflate could not be initialized\n");
537 /* Assuming stateless operation */
539 if (op->m_src->nb_segs > 1) {
540 single_src_buf = rte_malloc(NULL,
541 rte_pktmbuf_pkt_len(op->m_src), 0);
542 if (single_src_buf == NULL) {
543 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
546 single_dst_buf = rte_malloc(NULL,
547 rte_pktmbuf_pkt_len(op->m_dst), 0);
548 if (single_dst_buf == NULL) {
549 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
552 if (rte_pktmbuf_read(op->m_src, 0,
553 rte_pktmbuf_pkt_len(op->m_src),
554 single_src_buf) == NULL) {
556 "Buffer could not be read entirely\n");
560 stream.avail_in = op->src.length;
561 stream.next_in = single_src_buf;
562 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
563 stream.next_out = single_dst_buf;
566 stream.avail_in = op->src.length;
567 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
568 stream.avail_out = op->m_dst->data_len;
569 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
572 /* Stateless operation, all buffer will be compressed in one go */
573 zlib_flush = map_zlib_flush_flag(op->flush_flag);
574 ret = inflate(&stream, zlib_flush);
576 if (stream.avail_in != 0) {
577 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
581 if (ret != Z_STREAM_END)
584 if (op->m_src->nb_segs > 1) {
585 uint32_t remaining_data = stream.total_out;
586 uint8_t *src_data = single_dst_buf;
587 struct rte_mbuf *dst_buf = op->m_dst;
589 while (remaining_data > 0) {
590 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
593 if (remaining_data < dst_buf->data_len) {
594 memcpy(dst_data, src_data, remaining_data);
597 memcpy(dst_data, src_data, dst_buf->data_len);
598 remaining_data -= dst_buf->data_len;
599 src_data += dst_buf->data_len;
600 dst_buf = dst_buf->next;
605 op->consumed = stream.total_in;
606 op->produced = stream.total_out;
607 op->status = RTE_COMP_OP_STATUS_SUCCESS;
609 inflateReset(&stream);
619 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
620 uint32_t total_data_size,
621 struct rte_mempool *small_mbuf_pool,
622 struct rte_mempool *large_mbuf_pool,
623 uint8_t limit_segs_in_sgl,
626 uint32_t remaining_data = total_data_size;
627 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
628 struct rte_mempool *pool;
629 struct rte_mbuf *next_seg;
632 const char *data_ptr = test_buf;
636 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
637 num_remaining_segs = limit_segs_in_sgl - 1;
640 * Allocate data in the first segment (header) and
641 * copy data if test buffer is provided
643 if (remaining_data < seg_size)
644 data_size = remaining_data;
646 data_size = seg_size;
647 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
648 if (buf_ptr == NULL) {
650 "Not enough space in the 1st buffer\n");
654 if (data_ptr != NULL) {
655 /* Copy characters without NULL terminator */
656 strncpy(buf_ptr, data_ptr, data_size);
657 data_ptr += data_size;
659 remaining_data -= data_size;
660 num_remaining_segs--;
663 * Allocate the rest of the segments,
664 * copy the rest of the data and chain the segments.
666 for (i = 0; i < num_remaining_segs; i++) {
668 if (i == (num_remaining_segs - 1)) {
670 if (remaining_data > seg_size)
671 pool = large_mbuf_pool;
673 pool = small_mbuf_pool;
674 data_size = remaining_data;
676 data_size = seg_size;
677 pool = small_mbuf_pool;
680 next_seg = rte_pktmbuf_alloc(pool);
681 if (next_seg == NULL) {
683 "New segment could not be allocated "
684 "from the mempool\n");
687 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
688 if (buf_ptr == NULL) {
690 "Not enough space in the buffer\n");
691 rte_pktmbuf_free(next_seg);
694 if (data_ptr != NULL) {
695 /* Copy characters without NULL terminator */
696 strncpy(buf_ptr, data_ptr, data_size);
697 data_ptr += data_size;
699 remaining_data -= data_size;
701 ret = rte_pktmbuf_chain(head_buf, next_seg);
703 rte_pktmbuf_free(next_seg);
705 "Segment could not chained\n");
714 * Compresses and decompresses buffer with compressdev API and Zlib API
717 test_deflate_comp_decomp(const struct interim_data_params *int_data,
718 const struct test_data_params *test_data)
720 struct comp_testsuite_params *ts_params = &testsuite_params;
721 const char * const *test_bufs = int_data->test_bufs;
722 unsigned int num_bufs = int_data->num_bufs;
723 uint16_t *buf_idx = int_data->buf_idx;
724 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
725 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
726 unsigned int num_xforms = int_data->num_xforms;
727 enum rte_comp_op_type state = test_data->state;
728 unsigned int buff_type = test_data->buff_type;
729 unsigned int out_of_space = test_data->out_of_space;
730 unsigned int big_data = test_data->big_data;
731 enum zlib_direction zlib_dir = test_data->zlib_dir;
732 int ret_status = TEST_FAILED;
734 struct rte_mbuf *uncomp_bufs[num_bufs];
735 struct rte_mbuf *comp_bufs[num_bufs];
736 struct rte_comp_op *ops[num_bufs];
737 struct rte_comp_op *ops_processed[num_bufs];
738 void *priv_xforms[num_bufs];
739 uint16_t num_enqd, num_deqd, num_total_deqd;
740 uint16_t num_priv_xforms = 0;
741 unsigned int deqd_retries = 0;
742 struct priv_op_data *priv_data;
745 struct rte_mempool *buf_pool;
747 /* Compressing with CompressDev */
748 unsigned int oos_zlib_decompress =
749 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
750 /* Decompressing with CompressDev */
751 unsigned int oos_zlib_compress =
752 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
753 const struct rte_compressdev_capabilities *capa =
754 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
755 char *contig_buf = NULL;
756 uint64_t compress_checksum[num_bufs];
760 "Compress device does not support DEFLATE\n");
764 /* Initialize all arrays to NULL */
765 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
766 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
767 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
768 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
769 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
772 buf_pool = ts_params->big_mbuf_pool;
773 else if (buff_type == SGL_BOTH)
774 buf_pool = ts_params->small_mbuf_pool;
776 buf_pool = ts_params->large_mbuf_pool;
778 /* Prepare the source mbufs with the data */
779 ret = rte_pktmbuf_alloc_bulk(buf_pool,
780 uncomp_bufs, num_bufs);
783 "Source mbufs could not be allocated "
784 "from the mempool\n");
788 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
789 for (i = 0; i < num_bufs; i++) {
790 data_size = strlen(test_bufs[i]) + 1;
791 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
793 big_data ? buf_pool : ts_params->small_mbuf_pool,
794 big_data ? buf_pool : ts_params->large_mbuf_pool,
795 big_data ? 0 : MAX_SEGS,
796 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
800 for (i = 0; i < num_bufs; i++) {
801 data_size = strlen(test_bufs[i]) + 1;
802 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
803 strlcpy(buf_ptr, test_bufs[i], data_size);
807 /* Prepare the destination mbufs */
808 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
811 "Destination mbufs could not be allocated "
812 "from the mempool\n");
816 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
817 for (i = 0; i < num_bufs; i++) {
818 if (out_of_space == 1 && oos_zlib_decompress)
819 data_size = OUT_OF_SPACE_BUF;
821 (data_size = strlen(test_bufs[i]) *
822 COMPRESS_BUF_SIZE_RATIO);
824 if (prepare_sgl_bufs(NULL, comp_bufs[i],
826 big_data ? buf_pool : ts_params->small_mbuf_pool,
827 big_data ? buf_pool : ts_params->large_mbuf_pool,
828 big_data ? 0 : MAX_SEGS,
829 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
835 for (i = 0; i < num_bufs; i++) {
836 if (out_of_space == 1 && oos_zlib_decompress)
837 data_size = OUT_OF_SPACE_BUF;
839 (data_size = strlen(test_bufs[i]) *
840 COMPRESS_BUF_SIZE_RATIO);
842 rte_pktmbuf_append(comp_bufs[i], data_size);
846 /* Build the compression operations */
847 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
850 "Compress operations could not be allocated "
851 "from the mempool\n");
856 for (i = 0; i < num_bufs; i++) {
857 ops[i]->m_src = uncomp_bufs[i];
858 ops[i]->m_dst = comp_bufs[i];
859 ops[i]->src.offset = 0;
860 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
861 ops[i]->dst.offset = 0;
862 if (state == RTE_COMP_OP_STATELESS) {
863 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
866 "Stateful operations are not supported "
867 "in these tests yet\n");
870 ops[i]->input_chksum = 0;
872 * Store original operation index in private data,
873 * since ordering does not have to be maintained,
874 * when dequeueing from compressdev, so a comparison
875 * at the end of the test can be done.
877 priv_data = (struct priv_op_data *) (ops[i] + 1);
878 priv_data->orig_idx = i;
881 /* Compress data (either with Zlib API or compressdev API */
882 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
883 for (i = 0; i < num_bufs; i++) {
884 const struct rte_comp_xform *compress_xform =
885 compress_xforms[i % num_xforms];
886 ret = compress_zlib(ops[i], compress_xform,
891 ops_processed[i] = ops[i];
894 /* Create compress private xform data */
895 for (i = 0; i < num_xforms; i++) {
896 ret = rte_compressdev_private_xform_create(0,
897 (const struct rte_comp_xform *)compress_xforms[i],
901 "Compression private xform "
902 "could not be created\n");
908 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
909 /* Attach shareable private xform data to ops */
910 for (i = 0; i < num_bufs; i++)
911 ops[i]->private_xform = priv_xforms[i % num_xforms];
913 /* Create rest of the private xforms for the other ops */
914 for (i = num_xforms; i < num_bufs; i++) {
915 ret = rte_compressdev_private_xform_create(0,
916 compress_xforms[i % num_xforms],
920 "Compression private xform "
921 "could not be created\n");
927 /* Attach non shareable private xform data to ops */
928 for (i = 0; i < num_bufs; i++)
929 ops[i]->private_xform = priv_xforms[i];
932 /* Enqueue and dequeue all operations */
933 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
934 if (num_enqd < num_bufs) {
936 "The operations could not be enqueued\n");
943 * If retrying a dequeue call, wait for 10 ms to allow
944 * enough time to the driver to process the operations
946 if (deqd_retries != 0) {
948 * Avoid infinite loop if not all the
949 * operations get out of the device
951 if (deqd_retries == MAX_DEQD_RETRIES) {
953 "Not all operations could be "
957 usleep(DEQUEUE_WAIT_TIME);
959 num_deqd = rte_compressdev_dequeue_burst(0, 0,
960 &ops_processed[num_total_deqd], num_bufs);
961 num_total_deqd += num_deqd;
964 } while (num_total_deqd < num_enqd);
968 /* Free compress private xforms */
969 for (i = 0; i < num_priv_xforms; i++) {
970 rte_compressdev_private_xform_free(0, priv_xforms[i]);
971 priv_xforms[i] = NULL;
976 for (i = 0; i < num_bufs; i++) {
977 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
978 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
979 const struct rte_comp_compress_xform *compress_xform =
980 &compress_xforms[xform_idx]->compress;
981 enum rte_comp_huffman huffman_type =
982 compress_xform->deflate.huffman;
983 char engine[] = "zlib (directly, not PMD)";
984 if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
985 strlcpy(engine, "PMD", sizeof(engine));
987 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
988 " %u bytes (level = %d, huffman = %s)\n",
989 buf_idx[priv_data->orig_idx], engine,
990 ops_processed[i]->consumed, ops_processed[i]->produced,
991 compress_xform->level,
992 huffman_type_strings[huffman_type]);
993 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
994 ops_processed[i]->consumed == 0 ? 0 :
995 (float)ops_processed[i]->produced /
996 ops_processed[i]->consumed * 100);
997 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
998 compress_checksum[i] = ops_processed[i]->output_chksum;
1003 * Check operation status and free source mbufs (destination mbuf and
1004 * compress operation information is needed for the decompression stage)
1006 for (i = 0; i < num_bufs; i++) {
1007 if (out_of_space && oos_zlib_decompress) {
1008 if (ops_processed[i]->status !=
1009 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1010 ret_status = TEST_FAILED;
1012 "Operation without expected out of "
1013 "space status error\n");
1019 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1021 "Some operations were not successful\n");
1024 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1025 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1026 uncomp_bufs[priv_data->orig_idx] = NULL;
1029 if (out_of_space && oos_zlib_decompress) {
1030 ret_status = TEST_SUCCESS;
1034 /* Allocate buffers for decompressed data */
1035 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1038 "Destination mbufs could not be allocated "
1039 "from the mempool\n");
1043 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1044 for (i = 0; i < num_bufs; i++) {
1045 priv_data = (struct priv_op_data *)
1046 (ops_processed[i] + 1);
1047 if (out_of_space == 1 && oos_zlib_compress)
1048 data_size = OUT_OF_SPACE_BUF;
1051 strlen(test_bufs[priv_data->orig_idx]) + 1;
1053 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1055 big_data ? buf_pool : ts_params->small_mbuf_pool,
1056 big_data ? buf_pool : ts_params->large_mbuf_pool,
1057 big_data ? 0 : MAX_SEGS,
1058 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1064 for (i = 0; i < num_bufs; i++) {
1065 priv_data = (struct priv_op_data *)
1066 (ops_processed[i] + 1);
1067 if (out_of_space == 1 && oos_zlib_compress)
1068 data_size = OUT_OF_SPACE_BUF;
1071 strlen(test_bufs[priv_data->orig_idx]) + 1;
1073 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1077 /* Build the decompression operations */
1078 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1081 "Decompress operations could not be allocated "
1082 "from the mempool\n");
1086 /* Source buffer is the compressed data from the previous operations */
1087 for (i = 0; i < num_bufs; i++) {
1088 ops[i]->m_src = ops_processed[i]->m_dst;
1089 ops[i]->m_dst = uncomp_bufs[i];
1090 ops[i]->src.offset = 0;
1092 * Set the length of the compressed data to the
1093 * number of bytes that were produced in the previous stage
1095 ops[i]->src.length = ops_processed[i]->produced;
1096 ops[i]->dst.offset = 0;
1097 if (state == RTE_COMP_OP_STATELESS) {
1098 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1101 "Stateful operations are not supported "
1102 "in these tests yet\n");
1105 ops[i]->input_chksum = 0;
1107 * Copy private data from previous operations,
1108 * to keep the pointer to the original buffer
1110 memcpy(ops[i] + 1, ops_processed[i] + 1,
1111 sizeof(struct priv_op_data));
1115 * Free the previous compress operations,
1116 * as they are not needed anymore
1118 rte_comp_op_bulk_free(ops_processed, num_bufs);
1120 /* Decompress data (either with Zlib API or compressdev API */
1121 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1122 for (i = 0; i < num_bufs; i++) {
1123 priv_data = (struct priv_op_data *)(ops[i] + 1);
1124 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1125 const struct rte_comp_xform *decompress_xform =
1126 decompress_xforms[xform_idx];
1128 ret = decompress_zlib(ops[i], decompress_xform);
1132 ops_processed[i] = ops[i];
1135 /* Create decompress private xform data */
1136 for (i = 0; i < num_xforms; i++) {
1137 ret = rte_compressdev_private_xform_create(0,
1138 (const struct rte_comp_xform *)decompress_xforms[i],
1142 "Decompression private xform "
1143 "could not be created\n");
1149 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1150 /* Attach shareable private xform data to ops */
1151 for (i = 0; i < num_bufs; i++) {
1152 priv_data = (struct priv_op_data *)(ops[i] + 1);
1153 uint16_t xform_idx = priv_data->orig_idx %
1155 ops[i]->private_xform = priv_xforms[xform_idx];
1158 /* Create rest of the private xforms for the other ops */
1159 for (i = num_xforms; i < num_bufs; i++) {
1160 ret = rte_compressdev_private_xform_create(0,
1161 decompress_xforms[i % num_xforms],
1165 "Decompression private xform "
1166 "could not be created\n");
1172 /* Attach non shareable private xform data to ops */
1173 for (i = 0; i < num_bufs; i++) {
1174 priv_data = (struct priv_op_data *)(ops[i] + 1);
1175 uint16_t xform_idx = priv_data->orig_idx;
1176 ops[i]->private_xform = priv_xforms[xform_idx];
1180 /* Enqueue and dequeue all operations */
1181 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1182 if (num_enqd < num_bufs) {
1184 "The operations could not be enqueued\n");
1191 * If retrying a dequeue call, wait for 10 ms to allow
1192 * enough time to the driver to process the operations
1194 if (deqd_retries != 0) {
1196 * Avoid infinite loop if not all the
1197 * operations get out of the device
1199 if (deqd_retries == MAX_DEQD_RETRIES) {
1201 "Not all operations could be "
1205 usleep(DEQUEUE_WAIT_TIME);
1207 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1208 &ops_processed[num_total_deqd], num_bufs);
1209 num_total_deqd += num_deqd;
1211 } while (num_total_deqd < num_enqd);
1216 for (i = 0; i < num_bufs; i++) {
1217 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1218 char engine[] = "zlib, (directly, no PMD)";
1219 if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1220 strlcpy(engine, "pmd", sizeof(engine));
1221 RTE_LOG(DEBUG, USER1,
1222 "Buffer %u decompressed by %s from %u to %u bytes\n",
1223 buf_idx[priv_data->orig_idx], engine,
1224 ops_processed[i]->consumed, ops_processed[i]->produced);
1229 * Check operation status and free source mbuf (destination mbuf and
1230 * compress operation information is still needed)
1232 for (i = 0; i < num_bufs; i++) {
1233 if (out_of_space && oos_zlib_compress) {
1234 if (ops_processed[i]->status !=
1235 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1236 ret_status = TEST_FAILED;
1238 "Operation without expected out of "
1239 "space status error\n");
1245 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1247 "Some operations were not successful\n");
1250 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1251 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1252 comp_bufs[priv_data->orig_idx] = NULL;
1255 if (out_of_space && oos_zlib_compress) {
1256 ret_status = TEST_SUCCESS;
1261 * Compare the original stream with the decompressed stream
1262 * (in size and the data)
1264 for (i = 0; i < num_bufs; i++) {
1265 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1266 const char *buf1 = test_bufs[priv_data->orig_idx];
1268 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1269 if (contig_buf == NULL) {
1270 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1275 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1276 ops_processed[i]->produced, contig_buf);
1277 if (compare_buffers(buf1, strlen(buf1) + 1,
1278 buf2, ops_processed[i]->produced) < 0)
1281 /* Test checksums */
1282 if (compress_xforms[0]->compress.chksum !=
1283 RTE_COMP_CHECKSUM_NONE) {
1284 if (ops_processed[i]->output_chksum !=
1285 compress_checksum[i]) {
1286 RTE_LOG(ERR, USER1, "The checksums differ\n"
1287 "Compression Checksum: %" PRIu64 "\tDecompression "
1288 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1289 ops_processed[i]->output_chksum);
1294 rte_free(contig_buf);
1298 ret_status = TEST_SUCCESS;
1301 /* Free resources */
1302 for (i = 0; i < num_bufs; i++) {
1303 rte_pktmbuf_free(uncomp_bufs[i]);
1304 rte_pktmbuf_free(comp_bufs[i]);
1305 rte_comp_op_free(ops[i]);
1306 rte_comp_op_free(ops_processed[i]);
1308 for (i = 0; i < num_priv_xforms; i++) {
1309 if (priv_xforms[i] != NULL)
1310 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1312 rte_free(contig_buf);
1318 test_compressdev_deflate_stateless_fixed(void)
1320 struct comp_testsuite_params *ts_params = &testsuite_params;
1323 const struct rte_compressdev_capabilities *capab;
1325 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1326 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1328 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1331 struct rte_comp_xform *compress_xform =
1332 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1334 if (compress_xform == NULL) {
1336 "Compress xform could not be created\n");
1341 memcpy(compress_xform, ts_params->def_comp_xform,
1342 sizeof(struct rte_comp_xform));
1343 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1345 struct interim_data_params int_data = {
1350 &ts_params->def_decomp_xform,
1354 struct test_data_params test_data = {
1355 RTE_COMP_OP_STATELESS,
1362 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1363 int_data.test_bufs = &compress_test_bufs[i];
1364 int_data.buf_idx = &i;
1366 /* Compress with compressdev, decompress with Zlib */
1367 test_data.zlib_dir = ZLIB_DECOMPRESS;
1368 ret = test_deflate_comp_decomp(&int_data, &test_data);
1372 /* Compress with Zlib, decompress with compressdev */
1373 test_data.zlib_dir = ZLIB_COMPRESS;
1374 ret = test_deflate_comp_decomp(&int_data, &test_data);
1382 rte_free(compress_xform);
1387 test_compressdev_deflate_stateless_dynamic(void)
1389 struct comp_testsuite_params *ts_params = &testsuite_params;
1392 struct rte_comp_xform *compress_xform =
1393 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1395 const struct rte_compressdev_capabilities *capab;
1397 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1398 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1400 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1403 if (compress_xform == NULL) {
1405 "Compress xform could not be created\n");
1410 memcpy(compress_xform, ts_params->def_comp_xform,
1411 sizeof(struct rte_comp_xform));
1412 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1414 struct interim_data_params int_data = {
1419 &ts_params->def_decomp_xform,
1423 struct test_data_params test_data = {
1424 RTE_COMP_OP_STATELESS,
1431 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1432 int_data.test_bufs = &compress_test_bufs[i];
1433 int_data.buf_idx = &i;
1435 /* Compress with compressdev, decompress with Zlib */
1436 test_data.zlib_dir = ZLIB_DECOMPRESS;
1437 ret = test_deflate_comp_decomp(&int_data, &test_data);
1441 /* Compress with Zlib, decompress with compressdev */
1442 test_data.zlib_dir = ZLIB_COMPRESS;
1443 ret = test_deflate_comp_decomp(&int_data, &test_data);
1451 rte_free(compress_xform);
1456 test_compressdev_deflate_stateless_multi_op(void)
1458 struct comp_testsuite_params *ts_params = &testsuite_params;
1459 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1460 uint16_t buf_idx[num_bufs];
1464 for (i = 0; i < num_bufs; i++)
1467 struct interim_data_params int_data = {
1471 &ts_params->def_comp_xform,
1472 &ts_params->def_decomp_xform,
1476 struct test_data_params test_data = {
1477 RTE_COMP_OP_STATELESS,
1484 /* Compress with compressdev, decompress with Zlib */
1485 test_data.zlib_dir = ZLIB_DECOMPRESS;
1486 ret = test_deflate_comp_decomp(&int_data, &test_data);
1490 /* Compress with Zlib, decompress with compressdev */
1491 test_data.zlib_dir = ZLIB_COMPRESS;
1492 ret = test_deflate_comp_decomp(&int_data, &test_data);
1496 return TEST_SUCCESS;
1500 test_compressdev_deflate_stateless_multi_level(void)
1502 struct comp_testsuite_params *ts_params = &testsuite_params;
1506 struct rte_comp_xform *compress_xform =
1507 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1509 if (compress_xform == NULL) {
1511 "Compress xform could not be created\n");
1516 memcpy(compress_xform, ts_params->def_comp_xform,
1517 sizeof(struct rte_comp_xform));
1519 struct interim_data_params int_data = {
1524 &ts_params->def_decomp_xform,
1528 struct test_data_params test_data = {
1529 RTE_COMP_OP_STATELESS,
1536 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1537 int_data.test_bufs = &compress_test_bufs[i];
1538 int_data.buf_idx = &i;
1540 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1542 compress_xform->compress.level = level;
1543 /* Compress with compressdev, decompress with Zlib */
1544 test_data.zlib_dir = ZLIB_DECOMPRESS;
1545 ret = test_deflate_comp_decomp(&int_data, &test_data);
1554 rte_free(compress_xform);
1558 #define NUM_XFORMS 3
1560 test_compressdev_deflate_stateless_multi_xform(void)
1562 struct comp_testsuite_params *ts_params = &testsuite_params;
1563 uint16_t num_bufs = NUM_XFORMS;
1564 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1565 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1566 const char *test_buffers[NUM_XFORMS];
1568 unsigned int level = RTE_COMP_LEVEL_MIN;
1569 uint16_t buf_idx[num_bufs];
1572 /* Create multiple xforms with various levels */
1573 for (i = 0; i < NUM_XFORMS; i++) {
1574 compress_xforms[i] = rte_malloc(NULL,
1575 sizeof(struct rte_comp_xform), 0);
1576 if (compress_xforms[i] == NULL) {
1578 "Compress xform could not be created\n");
1583 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1584 sizeof(struct rte_comp_xform));
1585 compress_xforms[i]->compress.level = level;
1588 decompress_xforms[i] = rte_malloc(NULL,
1589 sizeof(struct rte_comp_xform), 0);
1590 if (decompress_xforms[i] == NULL) {
1592 "Decompress xform could not be created\n");
1597 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1598 sizeof(struct rte_comp_xform));
1601 for (i = 0; i < NUM_XFORMS; i++) {
1603 /* Use the same buffer in all sessions */
1604 test_buffers[i] = compress_test_bufs[0];
1607 struct interim_data_params int_data = {
1616 struct test_data_params test_data = {
1617 RTE_COMP_OP_STATELESS,
1624 /* Compress with compressdev, decompress with Zlib */
1625 ret = test_deflate_comp_decomp(&int_data, &test_data);
1632 for (i = 0; i < NUM_XFORMS; i++) {
1633 rte_free(compress_xforms[i]);
1634 rte_free(decompress_xforms[i]);
1641 test_compressdev_deflate_stateless_sgl(void)
1643 struct comp_testsuite_params *ts_params = &testsuite_params;
1646 const struct rte_compressdev_capabilities *capab;
1648 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1649 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1651 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1654 struct interim_data_params int_data = {
1658 &ts_params->def_comp_xform,
1659 &ts_params->def_decomp_xform,
1663 struct test_data_params test_data = {
1664 RTE_COMP_OP_STATELESS,
1671 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1672 int_data.test_bufs = &compress_test_bufs[i];
1673 int_data.buf_idx = &i;
1675 /* Compress with compressdev, decompress with Zlib */
1676 test_data.zlib_dir = ZLIB_DECOMPRESS;
1677 ret = test_deflate_comp_decomp(&int_data, &test_data);
1681 /* Compress with Zlib, decompress with compressdev */
1682 test_data.zlib_dir = ZLIB_COMPRESS;
1683 ret = test_deflate_comp_decomp(&int_data, &test_data);
1687 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1688 /* Compress with compressdev, decompress with Zlib */
1689 test_data.zlib_dir = ZLIB_DECOMPRESS;
1690 test_data.buff_type = SGL_TO_LB;
1691 ret = test_deflate_comp_decomp(&int_data, &test_data);
1695 /* Compress with Zlib, decompress with compressdev */
1696 test_data.zlib_dir = ZLIB_COMPRESS;
1697 test_data.buff_type = SGL_TO_LB;
1698 ret = test_deflate_comp_decomp(&int_data, &test_data);
1703 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1704 /* Compress with compressdev, decompress with Zlib */
1705 test_data.zlib_dir = ZLIB_DECOMPRESS;
1706 test_data.buff_type = LB_TO_SGL;
1707 ret = test_deflate_comp_decomp(&int_data, &test_data);
1711 /* Compress with Zlib, decompress with compressdev */
1712 test_data.zlib_dir = ZLIB_COMPRESS;
1713 test_data.buff_type = LB_TO_SGL;
1714 ret = test_deflate_comp_decomp(&int_data, &test_data);
1720 return TEST_SUCCESS;
1724 test_compressdev_deflate_stateless_checksum(void)
1726 struct comp_testsuite_params *ts_params = &testsuite_params;
1729 const struct rte_compressdev_capabilities *capab;
1731 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1732 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1734 /* Check if driver supports any checksum */
1735 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1736 (capab->comp_feature_flags &
1737 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1738 (capab->comp_feature_flags &
1739 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1742 struct rte_comp_xform *compress_xform =
1743 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1744 if (compress_xform == NULL) {
1745 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1749 memcpy(compress_xform, ts_params->def_comp_xform,
1750 sizeof(struct rte_comp_xform));
1752 struct rte_comp_xform *decompress_xform =
1753 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1754 if (decompress_xform == NULL) {
1755 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1756 rte_free(compress_xform);
1760 memcpy(decompress_xform, ts_params->def_decomp_xform,
1761 sizeof(struct rte_comp_xform));
1763 struct interim_data_params int_data = {
1772 struct test_data_params test_data = {
1773 RTE_COMP_OP_STATELESS,
1780 /* Check if driver supports crc32 checksum and test */
1781 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1782 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1783 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1785 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1786 /* Compress with compressdev, decompress with Zlib */
1787 int_data.test_bufs = &compress_test_bufs[i];
1788 int_data.buf_idx = &i;
1790 /* Generate zlib checksum and test against selected
1791 * drivers decompression checksum
1793 test_data.zlib_dir = ZLIB_COMPRESS;
1794 ret = test_deflate_comp_decomp(&int_data, &test_data);
1798 /* Generate compression and decompression
1799 * checksum of selected driver
1801 test_data.zlib_dir = ZLIB_NONE;
1802 ret = test_deflate_comp_decomp(&int_data, &test_data);
1808 /* Check if driver supports adler32 checksum and test */
1809 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1810 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1811 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1813 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1814 int_data.test_bufs = &compress_test_bufs[i];
1815 int_data.buf_idx = &i;
1817 /* Generate zlib checksum and test against selected
1818 * drivers decompression checksum
1820 test_data.zlib_dir = ZLIB_COMPRESS;
1821 ret = test_deflate_comp_decomp(&int_data, &test_data);
1824 /* Generate compression and decompression
1825 * checksum of selected driver
1827 test_data.zlib_dir = ZLIB_NONE;
1828 ret = test_deflate_comp_decomp(&int_data, &test_data);
1834 /* Check if driver supports combined crc and adler checksum and test */
1835 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1836 compress_xform->compress.chksum =
1837 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1838 decompress_xform->decompress.chksum =
1839 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1841 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1842 int_data.test_bufs = &compress_test_bufs[i];
1843 int_data.buf_idx = &i;
1845 /* Generate compression and decompression
1846 * checksum of selected driver
1848 test_data.zlib_dir = ZLIB_NONE;
1849 ret = test_deflate_comp_decomp(&int_data, &test_data);
1858 rte_free(compress_xform);
1859 rte_free(decompress_xform);
1864 test_compressdev_out_of_space_buffer(void)
1866 struct comp_testsuite_params *ts_params = &testsuite_params;
1869 const struct rte_compressdev_capabilities *capab;
1871 RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
1873 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1874 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1876 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1879 struct rte_comp_xform *compress_xform =
1880 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1882 if (compress_xform == NULL) {
1884 "Compress xform could not be created\n");
1889 struct interim_data_params int_data = {
1890 &compress_test_bufs[0],
1893 &ts_params->def_comp_xform,
1894 &ts_params->def_decomp_xform,
1898 struct test_data_params test_data = {
1899 RTE_COMP_OP_STATELESS,
1902 1, /* run out-of-space test */
1905 /* Compress with compressdev, decompress with Zlib */
1906 test_data.zlib_dir = ZLIB_DECOMPRESS;
1907 ret = test_deflate_comp_decomp(&int_data, &test_data);
1911 /* Compress with Zlib, decompress with compressdev */
1912 test_data.zlib_dir = ZLIB_COMPRESS;
1913 ret = test_deflate_comp_decomp(&int_data, &test_data);
1917 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1918 /* Compress with compressdev, decompress with Zlib */
1919 test_data.zlib_dir = ZLIB_DECOMPRESS;
1920 test_data.buff_type = SGL_BOTH;
1921 ret = test_deflate_comp_decomp(&int_data, &test_data);
1925 /* Compress with Zlib, decompress with compressdev */
1926 test_data.zlib_dir = ZLIB_COMPRESS;
1927 test_data.buff_type = SGL_BOTH;
1928 ret = test_deflate_comp_decomp(&int_data, &test_data);
1936 rte_free(compress_xform);
1941 test_compressdev_deflate_stateless_dynamic_big(void)
1943 struct comp_testsuite_params *ts_params = &testsuite_params;
1947 const struct rte_compressdev_capabilities *capab;
1948 char *test_buffer = NULL;
1950 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1951 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1953 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1956 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1959 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1960 if (test_buffer == NULL) {
1962 "Can't allocate buffer for big-data\n");
1966 struct interim_data_params int_data = {
1967 (const char * const *)&test_buffer,
1970 &ts_params->def_comp_xform,
1971 &ts_params->def_decomp_xform,
1975 struct test_data_params test_data = {
1976 RTE_COMP_OP_STATELESS,
1983 ts_params->def_comp_xform->compress.deflate.huffman =
1984 RTE_COMP_HUFFMAN_DYNAMIC;
1986 /* fill the buffer with data based on rand. data */
1987 srand(BIG_DATA_TEST_SIZE);
1988 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
1989 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
1990 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
1992 /* Compress with compressdev, decompress with Zlib */
1993 test_data.zlib_dir = ZLIB_DECOMPRESS;
1994 ret = test_deflate_comp_decomp(&int_data, &test_data);
1998 /* Compress with Zlib, decompress with compressdev */
1999 test_data.zlib_dir = ZLIB_COMPRESS;
2000 ret = test_deflate_comp_decomp(&int_data, &test_data);
2007 ts_params->def_comp_xform->compress.deflate.huffman =
2008 RTE_COMP_HUFFMAN_DEFAULT;
2009 rte_free(test_buffer);
2014 static struct unit_test_suite compressdev_testsuite = {
2015 .suite_name = "compressdev unit test suite",
2016 .setup = testsuite_setup,
2017 .teardown = testsuite_teardown,
2018 .unit_test_cases = {
2019 TEST_CASE_ST(NULL, NULL,
2020 test_compressdev_invalid_configuration),
2021 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2022 test_compressdev_deflate_stateless_fixed),
2023 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2024 test_compressdev_deflate_stateless_dynamic),
2025 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2026 test_compressdev_deflate_stateless_dynamic_big),
2027 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2028 test_compressdev_deflate_stateless_multi_op),
2029 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2030 test_compressdev_deflate_stateless_multi_level),
2031 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2032 test_compressdev_deflate_stateless_multi_xform),
2033 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 test_compressdev_deflate_stateless_sgl),
2035 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 test_compressdev_deflate_stateless_checksum),
2037 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 test_compressdev_out_of_space_buffer),
2039 TEST_CASES_END() /**< NULL terminate unit test array */
2044 test_compressdev(void)
2046 return unit_test_suite_runner(&compressdev_testsuite);
2049 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);