1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
47 #define OUT_OF_SPACE_BUF 1
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
55 huffman_type_strings[] = {
56 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
57 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
58 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
69 LB_BOTH = 0, /* both input and output are linear*/
70 SGL_BOTH, /* both input and output are chained */
71 SGL_TO_LB, /* input buffer is chained */
72 LB_TO_SGL /* output buffer is chained */
79 struct comp_testsuite_params {
80 struct rte_mempool *large_mbuf_pool;
81 struct rte_mempool *small_mbuf_pool;
82 struct rte_mempool *big_mbuf_pool;
83 struct rte_mempool *op_pool;
84 struct rte_comp_xform *def_comp_xform;
85 struct rte_comp_xform *def_decomp_xform;
88 struct interim_data_params {
89 const char * const *test_bufs;
90 unsigned int num_bufs;
92 struct rte_comp_xform **compress_xforms;
93 struct rte_comp_xform **decompress_xforms;
94 unsigned int num_xforms;
97 struct test_data_params {
98 enum rte_comp_op_type state;
99 enum varied_buff buff_type;
100 enum zlib_direction zlib_dir;
101 unsigned int out_of_space;
102 unsigned int big_data;
105 static struct comp_testsuite_params testsuite_params = { 0 };
108 testsuite_teardown(void)
110 struct comp_testsuite_params *ts_params = &testsuite_params;
112 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
113 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
114 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
115 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
116 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
117 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
118 if (rte_mempool_in_use_count(ts_params->op_pool))
119 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
121 rte_mempool_free(ts_params->large_mbuf_pool);
122 rte_mempool_free(ts_params->small_mbuf_pool);
123 rte_mempool_free(ts_params->big_mbuf_pool);
124 rte_mempool_free(ts_params->op_pool);
125 rte_free(ts_params->def_comp_xform);
126 rte_free(ts_params->def_decomp_xform);
130 testsuite_setup(void)
132 struct comp_testsuite_params *ts_params = &testsuite_params;
133 uint32_t max_buf_size = 0;
136 if (rte_compressdev_count() == 0) {
137 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
141 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
142 rte_compressdev_name_get(0));
144 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
145 max_buf_size = RTE_MAX(max_buf_size,
146 strlen(compress_test_bufs[i]) + 1);
149 * Buffers to be used in compression and decompression.
150 * Since decompressed data might be larger than
151 * compressed data (due to block header),
152 * buffers should be big enough for both cases.
154 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
155 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
158 max_buf_size + RTE_PKTMBUF_HEADROOM,
160 if (ts_params->large_mbuf_pool == NULL) {
161 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
165 /* Create mempool with smaller buffers for SGL testing */
166 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
167 NUM_LARGE_MBUFS * MAX_SEGS,
169 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
171 if (ts_params->small_mbuf_pool == NULL) {
172 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
176 /* Create mempool with big buffers for SGL testing */
177 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
180 MAX_MBUF_SEGMENT_SIZE,
182 if (ts_params->big_mbuf_pool == NULL) {
183 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
187 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
188 0, sizeof(struct priv_op_data),
190 if (ts_params->op_pool == NULL) {
191 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
195 ts_params->def_comp_xform =
196 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
197 if (ts_params->def_comp_xform == NULL) {
199 "Default compress xform could not be created\n");
202 ts_params->def_decomp_xform =
203 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
204 if (ts_params->def_decomp_xform == NULL) {
206 "Default decompress xform could not be created\n");
210 /* Initializes default values for compress/decompress xforms */
211 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
212 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
213 ts_params->def_comp_xform->compress.deflate.huffman =
214 RTE_COMP_HUFFMAN_DEFAULT;
215 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
216 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
217 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
219 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
220 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
221 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
222 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
227 testsuite_teardown();
233 generic_ut_setup(void)
235 /* Configure compressdev (one device, one queue pair) */
236 struct rte_compressdev_config config = {
237 .socket_id = rte_socket_id(),
239 .max_nb_priv_xforms = NUM_MAX_XFORMS,
243 if (rte_compressdev_configure(0, &config) < 0) {
244 RTE_LOG(ERR, USER1, "Device configuration failed\n");
248 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
249 rte_socket_id()) < 0) {
250 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
254 if (rte_compressdev_start(0) < 0) {
255 RTE_LOG(ERR, USER1, "Device could not be started\n");
263 generic_ut_teardown(void)
265 rte_compressdev_stop(0);
266 if (rte_compressdev_close(0) < 0)
267 RTE_LOG(ERR, USER1, "Device could not be closed\n");
271 test_compressdev_invalid_configuration(void)
273 struct rte_compressdev_config invalid_config;
274 struct rte_compressdev_config valid_config = {
275 .socket_id = rte_socket_id(),
277 .max_nb_priv_xforms = NUM_MAX_XFORMS,
280 struct rte_compressdev_info dev_info;
282 /* Invalid configuration with 0 queue pairs */
283 memcpy(&invalid_config, &valid_config,
284 sizeof(struct rte_compressdev_config));
285 invalid_config.nb_queue_pairs = 0;
287 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
288 "Device configuration was successful "
289 "with no queue pairs (invalid)\n");
292 * Invalid configuration with too many queue pairs
293 * (if there is an actual maximum number of queue pairs)
295 rte_compressdev_info_get(0, &dev_info);
296 if (dev_info.max_nb_queue_pairs != 0) {
297 memcpy(&invalid_config, &valid_config,
298 sizeof(struct rte_compressdev_config));
299 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
301 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
302 "Device configuration was successful "
303 "with too many queue pairs (invalid)\n");
306 /* Invalid queue pair setup, with no number of queue pairs set */
307 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
308 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
309 "Queue pair setup was successful "
310 "with no queue pairs set (invalid)\n");
316 compare_buffers(const char *buffer1, uint32_t buffer1_len,
317 const char *buffer2, uint32_t buffer2_len)
319 if (buffer1_len != buffer2_len) {
320 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
324 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
325 RTE_LOG(ERR, USER1, "Buffers are different\n");
333 * Maps compressdev and Zlib flush flags
336 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
339 case RTE_COMP_FLUSH_NONE:
341 case RTE_COMP_FLUSH_SYNC:
343 case RTE_COMP_FLUSH_FULL:
345 case RTE_COMP_FLUSH_FINAL:
348 * There should be only the values above,
349 * so this should never happen
357 compress_zlib(struct rte_comp_op *op,
358 const struct rte_comp_xform *xform, int mem_level)
362 int strategy, window_bits, comp_level;
363 int ret = TEST_FAILED;
364 uint8_t *single_src_buf = NULL;
365 uint8_t *single_dst_buf = NULL;
367 /* initialize zlib stream */
368 stream.zalloc = Z_NULL;
369 stream.zfree = Z_NULL;
370 stream.opaque = Z_NULL;
372 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
375 strategy = Z_DEFAULT_STRATEGY;
378 * Window bits is the base two logarithm of the window size (in bytes).
379 * When doing raw DEFLATE, this number will be negative.
381 window_bits = -(xform->compress.window_size);
382 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
384 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
385 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
387 comp_level = xform->compress.level;
389 if (comp_level != RTE_COMP_LEVEL_NONE)
390 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
391 window_bits, mem_level, strategy);
393 ret = deflateInit(&stream, Z_NO_COMPRESSION);
396 printf("Zlib deflate could not be initialized\n");
400 /* Assuming stateless operation */
402 if (op->m_src->nb_segs > 1) {
403 single_src_buf = rte_malloc(NULL,
404 rte_pktmbuf_pkt_len(op->m_src), 0);
405 if (single_src_buf == NULL) {
406 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
410 if (rte_pktmbuf_read(op->m_src, op->src.offset,
411 rte_pktmbuf_pkt_len(op->m_src) -
413 single_src_buf) == NULL) {
415 "Buffer could not be read entirely\n");
419 stream.avail_in = op->src.length;
420 stream.next_in = single_src_buf;
423 stream.avail_in = op->src.length;
424 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
428 if (op->m_dst->nb_segs > 1) {
430 single_dst_buf = rte_malloc(NULL,
431 rte_pktmbuf_pkt_len(op->m_dst), 0);
432 if (single_dst_buf == NULL) {
434 "Buffer could not be allocated\n");
438 stream.avail_out = op->m_dst->pkt_len;
439 stream.next_out = single_dst_buf;
441 } else {/* linear output */
442 stream.avail_out = op->m_dst->data_len;
443 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
447 /* Stateless operation, all buffer will be compressed in one go */
448 zlib_flush = map_zlib_flush_flag(op->flush_flag);
449 ret = deflate(&stream, zlib_flush);
451 if (stream.avail_in != 0) {
452 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
456 if (ret != Z_STREAM_END)
459 /* Copy data to destination SGL */
460 if (op->m_dst->nb_segs > 1) {
461 uint32_t remaining_data = stream.total_out;
462 uint8_t *src_data = single_dst_buf;
463 struct rte_mbuf *dst_buf = op->m_dst;
465 while (remaining_data > 0) {
466 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
467 uint8_t *, op->dst.offset);
469 if (remaining_data < dst_buf->data_len) {
470 memcpy(dst_data, src_data, remaining_data);
473 memcpy(dst_data, src_data, dst_buf->data_len);
474 remaining_data -= dst_buf->data_len;
475 src_data += dst_buf->data_len;
476 dst_buf = dst_buf->next;
481 op->consumed = stream.total_in;
482 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
483 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
484 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
485 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
487 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
488 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
489 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
490 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
493 op->produced = stream.total_out;
495 op->status = RTE_COMP_OP_STATUS_SUCCESS;
496 op->output_chksum = stream.adler;
498 deflateReset(&stream);
503 rte_free(single_src_buf);
504 rte_free(single_dst_buf);
510 decompress_zlib(struct rte_comp_op *op,
511 const struct rte_comp_xform *xform)
516 int ret = TEST_FAILED;
517 uint8_t *single_src_buf = NULL;
518 uint8_t *single_dst_buf = NULL;
520 /* initialize zlib stream */
521 stream.zalloc = Z_NULL;
522 stream.zfree = Z_NULL;
523 stream.opaque = Z_NULL;
526 * Window bits is the base two logarithm of the window size (in bytes).
527 * When doing raw DEFLATE, this number will be negative.
529 window_bits = -(xform->decompress.window_size);
530 ret = inflateInit2(&stream, window_bits);
533 printf("Zlib deflate could not be initialized\n");
537 /* Assuming stateless operation */
539 if (op->m_src->nb_segs > 1) {
540 single_src_buf = rte_malloc(NULL,
541 rte_pktmbuf_pkt_len(op->m_src), 0);
542 if (single_src_buf == NULL) {
543 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
546 single_dst_buf = rte_malloc(NULL,
547 rte_pktmbuf_pkt_len(op->m_dst), 0);
548 if (single_dst_buf == NULL) {
549 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
552 if (rte_pktmbuf_read(op->m_src, 0,
553 rte_pktmbuf_pkt_len(op->m_src),
554 single_src_buf) == NULL) {
556 "Buffer could not be read entirely\n");
560 stream.avail_in = op->src.length;
561 stream.next_in = single_src_buf;
562 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
563 stream.next_out = single_dst_buf;
566 stream.avail_in = op->src.length;
567 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
568 stream.avail_out = op->m_dst->data_len;
569 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
572 /* Stateless operation, all buffer will be compressed in one go */
573 zlib_flush = map_zlib_flush_flag(op->flush_flag);
574 ret = inflate(&stream, zlib_flush);
576 if (stream.avail_in != 0) {
577 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
581 if (ret != Z_STREAM_END)
584 if (op->m_src->nb_segs > 1) {
585 uint32_t remaining_data = stream.total_out;
586 uint8_t *src_data = single_dst_buf;
587 struct rte_mbuf *dst_buf = op->m_dst;
589 while (remaining_data > 0) {
590 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
593 if (remaining_data < dst_buf->data_len) {
594 memcpy(dst_data, src_data, remaining_data);
597 memcpy(dst_data, src_data, dst_buf->data_len);
598 remaining_data -= dst_buf->data_len;
599 src_data += dst_buf->data_len;
600 dst_buf = dst_buf->next;
605 op->consumed = stream.total_in;
606 op->produced = stream.total_out;
607 op->status = RTE_COMP_OP_STATUS_SUCCESS;
609 inflateReset(&stream);
619 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
620 uint32_t total_data_size,
621 struct rte_mempool *small_mbuf_pool,
622 struct rte_mempool *large_mbuf_pool,
623 uint8_t limit_segs_in_sgl,
626 uint32_t remaining_data = total_data_size;
627 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
628 struct rte_mempool *pool;
629 struct rte_mbuf *next_seg;
632 const char *data_ptr = test_buf;
636 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
637 num_remaining_segs = limit_segs_in_sgl - 1;
640 * Allocate data in the first segment (header) and
641 * copy data if test buffer is provided
643 if (remaining_data < seg_size)
644 data_size = remaining_data;
646 data_size = seg_size;
647 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
648 if (buf_ptr == NULL) {
650 "Not enough space in the 1st buffer\n");
654 if (data_ptr != NULL) {
655 /* Copy characters without NULL terminator */
656 strncpy(buf_ptr, data_ptr, data_size);
657 data_ptr += data_size;
659 remaining_data -= data_size;
660 num_remaining_segs--;
663 * Allocate the rest of the segments,
664 * copy the rest of the data and chain the segments.
666 for (i = 0; i < num_remaining_segs; i++) {
668 if (i == (num_remaining_segs - 1)) {
670 if (remaining_data > seg_size)
671 pool = large_mbuf_pool;
673 pool = small_mbuf_pool;
674 data_size = remaining_data;
676 data_size = seg_size;
677 pool = small_mbuf_pool;
680 next_seg = rte_pktmbuf_alloc(pool);
681 if (next_seg == NULL) {
683 "New segment could not be allocated "
684 "from the mempool\n");
687 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
688 if (buf_ptr == NULL) {
690 "Not enough space in the buffer\n");
691 rte_pktmbuf_free(next_seg);
694 if (data_ptr != NULL) {
695 /* Copy characters without NULL terminator */
696 strncpy(buf_ptr, data_ptr, data_size);
697 data_ptr += data_size;
699 remaining_data -= data_size;
701 ret = rte_pktmbuf_chain(head_buf, next_seg);
703 rte_pktmbuf_free(next_seg);
705 "Segment could not chained\n");
714 * Compresses and decompresses buffer with compressdev API and Zlib API
717 test_deflate_comp_decomp(const struct interim_data_params *int_data,
718 const struct test_data_params *test_data)
720 struct comp_testsuite_params *ts_params = &testsuite_params;
721 const char * const *test_bufs = int_data->test_bufs;
722 unsigned int num_bufs = int_data->num_bufs;
723 uint16_t *buf_idx = int_data->buf_idx;
724 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
725 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
726 unsigned int num_xforms = int_data->num_xforms;
727 enum rte_comp_op_type state = test_data->state;
728 unsigned int buff_type = test_data->buff_type;
729 unsigned int out_of_space = test_data->out_of_space;
730 unsigned int big_data = test_data->big_data;
731 enum zlib_direction zlib_dir = test_data->zlib_dir;
734 struct rte_mbuf *uncomp_bufs[num_bufs];
735 struct rte_mbuf *comp_bufs[num_bufs];
736 struct rte_comp_op *ops[num_bufs];
737 struct rte_comp_op *ops_processed[num_bufs];
738 void *priv_xforms[num_bufs];
739 uint16_t num_enqd, num_deqd, num_total_deqd;
740 uint16_t num_priv_xforms = 0;
741 unsigned int deqd_retries = 0;
742 struct priv_op_data *priv_data;
745 struct rte_mempool *buf_pool;
747 /* Compressing with CompressDev */
748 unsigned int oos_zlib_decompress =
749 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
750 /* Decompressing with CompressDev */
751 unsigned int oos_zlib_compress =
752 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
753 const struct rte_compressdev_capabilities *capa =
754 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
755 char *contig_buf = NULL;
756 uint64_t compress_checksum[num_bufs];
758 /* Initialize all arrays to NULL */
759 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
760 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
761 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
762 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
763 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
766 buf_pool = ts_params->big_mbuf_pool;
767 else if (buff_type == SGL_BOTH)
768 buf_pool = ts_params->small_mbuf_pool;
770 buf_pool = ts_params->large_mbuf_pool;
772 /* Prepare the source mbufs with the data */
773 ret = rte_pktmbuf_alloc_bulk(buf_pool,
774 uncomp_bufs, num_bufs);
777 "Source mbufs could not be allocated "
778 "from the mempool\n");
782 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
783 for (i = 0; i < num_bufs; i++) {
784 data_size = strlen(test_bufs[i]) + 1;
785 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
787 big_data ? buf_pool : ts_params->small_mbuf_pool,
788 big_data ? buf_pool : ts_params->large_mbuf_pool,
789 big_data ? 0 : MAX_SEGS,
790 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
794 for (i = 0; i < num_bufs; i++) {
795 data_size = strlen(test_bufs[i]) + 1;
796 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
797 strlcpy(buf_ptr, test_bufs[i], data_size);
801 /* Prepare the destination mbufs */
802 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
805 "Destination mbufs could not be allocated "
806 "from the mempool\n");
810 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
811 for (i = 0; i < num_bufs; i++) {
812 if (out_of_space == 1 && oos_zlib_decompress)
813 data_size = OUT_OF_SPACE_BUF;
815 (data_size = strlen(test_bufs[i]) *
816 COMPRESS_BUF_SIZE_RATIO);
818 if (prepare_sgl_bufs(NULL, comp_bufs[i],
820 big_data ? buf_pool : ts_params->small_mbuf_pool,
821 big_data ? buf_pool : ts_params->large_mbuf_pool,
822 big_data ? 0 : MAX_SEGS,
823 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
829 for (i = 0; i < num_bufs; i++) {
830 if (out_of_space == 1 && oos_zlib_decompress)
831 data_size = OUT_OF_SPACE_BUF;
833 (data_size = strlen(test_bufs[i]) *
834 COMPRESS_BUF_SIZE_RATIO);
836 rte_pktmbuf_append(comp_bufs[i], data_size);
840 /* Build the compression operations */
841 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
844 "Compress operations could not be allocated "
845 "from the mempool\n");
850 for (i = 0; i < num_bufs; i++) {
851 ops[i]->m_src = uncomp_bufs[i];
852 ops[i]->m_dst = comp_bufs[i];
853 ops[i]->src.offset = 0;
854 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
855 ops[i]->dst.offset = 0;
856 if (state == RTE_COMP_OP_STATELESS) {
857 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
860 "Stateful operations are not supported "
861 "in these tests yet\n");
864 ops[i]->input_chksum = 0;
866 * Store original operation index in private data,
867 * since ordering does not have to be maintained,
868 * when dequeueing from compressdev, so a comparison
869 * at the end of the test can be done.
871 priv_data = (struct priv_op_data *) (ops[i] + 1);
872 priv_data->orig_idx = i;
875 /* Compress data (either with Zlib API or compressdev API */
876 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
877 for (i = 0; i < num_bufs; i++) {
878 const struct rte_comp_xform *compress_xform =
879 compress_xforms[i % num_xforms];
880 ret = compress_zlib(ops[i], compress_xform,
885 ops_processed[i] = ops[i];
888 /* Create compress private xform data */
889 for (i = 0; i < num_xforms; i++) {
890 ret = rte_compressdev_private_xform_create(0,
891 (const struct rte_comp_xform *)compress_xforms[i],
895 "Compression private xform "
896 "could not be created\n");
902 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
903 /* Attach shareable private xform data to ops */
904 for (i = 0; i < num_bufs; i++)
905 ops[i]->private_xform = priv_xforms[i % num_xforms];
907 /* Create rest of the private xforms for the other ops */
908 for (i = num_xforms; i < num_bufs; i++) {
909 ret = rte_compressdev_private_xform_create(0,
910 compress_xforms[i % num_xforms],
914 "Compression private xform "
915 "could not be created\n");
921 /* Attach non shareable private xform data to ops */
922 for (i = 0; i < num_bufs; i++)
923 ops[i]->private_xform = priv_xforms[i];
926 /* Enqueue and dequeue all operations */
927 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
928 if (num_enqd < num_bufs) {
930 "The operations could not be enqueued\n");
937 * If retrying a dequeue call, wait for 10 ms to allow
938 * enough time to the driver to process the operations
940 if (deqd_retries != 0) {
942 * Avoid infinite loop if not all the
943 * operations get out of the device
945 if (deqd_retries == MAX_DEQD_RETRIES) {
947 "Not all operations could be "
951 usleep(DEQUEUE_WAIT_TIME);
953 num_deqd = rte_compressdev_dequeue_burst(0, 0,
954 &ops_processed[num_total_deqd], num_bufs);
955 num_total_deqd += num_deqd;
958 } while (num_total_deqd < num_enqd);
962 /* Free compress private xforms */
963 for (i = 0; i < num_priv_xforms; i++) {
964 rte_compressdev_private_xform_free(0, priv_xforms[i]);
965 priv_xforms[i] = NULL;
970 for (i = 0; i < num_bufs; i++) {
971 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
972 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
973 const struct rte_comp_compress_xform *compress_xform =
974 &compress_xforms[xform_idx]->compress;
975 enum rte_comp_huffman huffman_type =
976 compress_xform->deflate.huffman;
977 char engine[] = "zlib (directly, not PMD)";
978 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
979 strlcpy(engine, "PMD", sizeof(engine));
981 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
982 " %u bytes (level = %d, huffman = %s)\n",
983 buf_idx[priv_data->orig_idx], engine,
984 ops_processed[i]->consumed, ops_processed[i]->produced,
985 compress_xform->level,
986 huffman_type_strings[huffman_type]);
987 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
988 ops_processed[i]->consumed == 0 ? 0 :
989 (float)ops_processed[i]->produced /
990 ops_processed[i]->consumed * 100);
991 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
992 compress_checksum[i] = ops_processed[i]->output_chksum;
997 * Check operation status and free source mbufs (destination mbuf and
998 * compress operation information is needed for the decompression stage)
1000 for (i = 0; i < num_bufs; i++) {
1001 if (out_of_space && oos_zlib_decompress) {
1002 if (ops_processed[i]->status !=
1003 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1007 "Operation without expected out of "
1008 "space status error\n");
1014 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1016 "Some operations were not successful\n");
1019 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1020 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1021 uncomp_bufs[priv_data->orig_idx] = NULL;
1024 if (out_of_space && oos_zlib_decompress) {
1029 /* Allocate buffers for decompressed data */
1030 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1033 "Destination mbufs could not be allocated "
1034 "from the mempool\n");
1038 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1039 for (i = 0; i < num_bufs; i++) {
1040 priv_data = (struct priv_op_data *)
1041 (ops_processed[i] + 1);
1042 if (out_of_space == 1 && oos_zlib_compress)
1043 data_size = OUT_OF_SPACE_BUF;
1046 strlen(test_bufs[priv_data->orig_idx]) + 1;
1048 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1050 big_data ? buf_pool : ts_params->small_mbuf_pool,
1051 big_data ? buf_pool : ts_params->large_mbuf_pool,
1052 big_data ? 0 : MAX_SEGS,
1053 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1059 for (i = 0; i < num_bufs; i++) {
1060 priv_data = (struct priv_op_data *)
1061 (ops_processed[i] + 1);
1062 if (out_of_space == 1 && oos_zlib_compress)
1063 data_size = OUT_OF_SPACE_BUF;
1066 strlen(test_bufs[priv_data->orig_idx]) + 1;
1068 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1072 /* Build the decompression operations */
1073 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1076 "Decompress operations could not be allocated "
1077 "from the mempool\n");
1081 /* Source buffer is the compressed data from the previous operations */
1082 for (i = 0; i < num_bufs; i++) {
1083 ops[i]->m_src = ops_processed[i]->m_dst;
1084 ops[i]->m_dst = uncomp_bufs[i];
1085 ops[i]->src.offset = 0;
1087 * Set the length of the compressed data to the
1088 * number of bytes that were produced in the previous stage
1090 ops[i]->src.length = ops_processed[i]->produced;
1091 ops[i]->dst.offset = 0;
1092 if (state == RTE_COMP_OP_STATELESS) {
1093 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1096 "Stateful operations are not supported "
1097 "in these tests yet\n");
1100 ops[i]->input_chksum = 0;
1102 * Copy private data from previous operations,
1103 * to keep the pointer to the original buffer
1105 memcpy(ops[i] + 1, ops_processed[i] + 1,
1106 sizeof(struct priv_op_data));
1110 * Free the previous compress operations,
1111 * as they are not needed anymore
1113 rte_comp_op_bulk_free(ops_processed, num_bufs);
1115 /* Decompress data (either with Zlib API or compressdev API */
1116 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1117 for (i = 0; i < num_bufs; i++) {
1118 priv_data = (struct priv_op_data *)(ops[i] + 1);
1119 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1120 const struct rte_comp_xform *decompress_xform =
1121 decompress_xforms[xform_idx];
1123 ret = decompress_zlib(ops[i], decompress_xform);
1127 ops_processed[i] = ops[i];
1130 /* Create decompress private xform data */
1131 for (i = 0; i < num_xforms; i++) {
1132 ret = rte_compressdev_private_xform_create(0,
1133 (const struct rte_comp_xform *)decompress_xforms[i],
1137 "Decompression private xform "
1138 "could not be created\n");
1144 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1145 /* Attach shareable private xform data to ops */
1146 for (i = 0; i < num_bufs; i++) {
1147 priv_data = (struct priv_op_data *)(ops[i] + 1);
1148 uint16_t xform_idx = priv_data->orig_idx %
1150 ops[i]->private_xform = priv_xforms[xform_idx];
1153 /* Create rest of the private xforms for the other ops */
1154 for (i = num_xforms; i < num_bufs; i++) {
1155 ret = rte_compressdev_private_xform_create(0,
1156 decompress_xforms[i % num_xforms],
1160 "Decompression private xform "
1161 "could not be created\n");
1167 /* Attach non shareable private xform data to ops */
1168 for (i = 0; i < num_bufs; i++) {
1169 priv_data = (struct priv_op_data *)(ops[i] + 1);
1170 uint16_t xform_idx = priv_data->orig_idx;
1171 ops[i]->private_xform = priv_xforms[xform_idx];
1175 /* Enqueue and dequeue all operations */
1176 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1177 if (num_enqd < num_bufs) {
1179 "The operations could not be enqueued\n");
1186 * If retrying a dequeue call, wait for 10 ms to allow
1187 * enough time to the driver to process the operations
1189 if (deqd_retries != 0) {
1191 * Avoid infinite loop if not all the
1192 * operations get out of the device
1194 if (deqd_retries == MAX_DEQD_RETRIES) {
1196 "Not all operations could be "
1200 usleep(DEQUEUE_WAIT_TIME);
1202 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1203 &ops_processed[num_total_deqd], num_bufs);
1204 num_total_deqd += num_deqd;
1206 } while (num_total_deqd < num_enqd);
1211 for (i = 0; i < num_bufs; i++) {
1212 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1213 char engine[] = "zlib, (directly, no PMD)";
1214 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1215 strlcpy(engine, "pmd", sizeof(engine));
1216 RTE_LOG(DEBUG, USER1,
1217 "Buffer %u decompressed by %s from %u to %u bytes\n",
1218 buf_idx[priv_data->orig_idx], engine,
1219 ops_processed[i]->consumed, ops_processed[i]->produced);
1224 * Check operation status and free source mbuf (destination mbuf and
1225 * compress operation information is still needed)
1227 for (i = 0; i < num_bufs; i++) {
1228 if (out_of_space && oos_zlib_compress) {
1229 if (ops_processed[i]->status !=
1230 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1234 "Operation without expected out of "
1235 "space status error\n");
1241 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1243 "Some operations were not successful\n");
1246 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1247 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1248 comp_bufs[priv_data->orig_idx] = NULL;
1251 if (out_of_space && oos_zlib_compress) {
1257 * Compare the original stream with the decompressed stream
1258 * (in size and the data)
1260 for (i = 0; i < num_bufs; i++) {
1261 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1262 const char *buf1 = test_bufs[priv_data->orig_idx];
1264 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1265 if (contig_buf == NULL) {
1266 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1271 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1272 ops_processed[i]->produced, contig_buf);
1273 if (compare_buffers(buf1, strlen(buf1) + 1,
1274 buf2, ops_processed[i]->produced) < 0)
1277 /* Test checksums */
1278 if (compress_xforms[0]->compress.chksum !=
1279 RTE_COMP_CHECKSUM_NONE) {
1280 if (ops_processed[i]->output_chksum !=
1281 compress_checksum[i]) {
1282 RTE_LOG(ERR, USER1, "The checksums differ\n"
1283 "Compression Checksum: %" PRIu64 "\tDecompression "
1284 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1285 ops_processed[i]->output_chksum);
1290 rte_free(contig_buf);
1297 /* Free resources */
1298 for (i = 0; i < num_bufs; i++) {
1299 rte_pktmbuf_free(uncomp_bufs[i]);
1300 rte_pktmbuf_free(comp_bufs[i]);
1301 rte_comp_op_free(ops[i]);
1302 rte_comp_op_free(ops_processed[i]);
1304 for (i = 0; i < num_priv_xforms; i++) {
1305 if (priv_xforms[i] != NULL)
1306 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1308 rte_free(contig_buf);
1314 test_compressdev_deflate_stateless_fixed(void)
1316 struct comp_testsuite_params *ts_params = &testsuite_params;
1319 const struct rte_compressdev_capabilities *capab;
1321 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1322 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1324 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1327 struct rte_comp_xform *compress_xform =
1328 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1330 if (compress_xform == NULL) {
1332 "Compress xform could not be created\n");
1337 memcpy(compress_xform, ts_params->def_comp_xform,
1338 sizeof(struct rte_comp_xform));
1339 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1341 struct interim_data_params int_data = {
1346 &ts_params->def_decomp_xform,
1350 struct test_data_params test_data = {
1351 RTE_COMP_OP_STATELESS,
1358 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1359 int_data.test_bufs = &compress_test_bufs[i];
1360 int_data.buf_idx = &i;
1362 /* Compress with compressdev, decompress with Zlib */
1363 test_data.zlib_dir = ZLIB_DECOMPRESS;
1364 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1369 /* Compress with Zlib, decompress with compressdev */
1370 test_data.zlib_dir = ZLIB_COMPRESS;
1371 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1380 rte_free(compress_xform);
1385 test_compressdev_deflate_stateless_dynamic(void)
1387 struct comp_testsuite_params *ts_params = &testsuite_params;
1390 struct rte_comp_xform *compress_xform =
1391 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1393 const struct rte_compressdev_capabilities *capab;
1395 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1396 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1398 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1401 if (compress_xform == NULL) {
1403 "Compress xform could not be created\n");
1408 memcpy(compress_xform, ts_params->def_comp_xform,
1409 sizeof(struct rte_comp_xform));
1410 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1412 struct interim_data_params int_data = {
1417 &ts_params->def_decomp_xform,
1421 struct test_data_params test_data = {
1422 RTE_COMP_OP_STATELESS,
1429 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1430 int_data.test_bufs = &compress_test_bufs[i];
1431 int_data.buf_idx = &i;
1433 /* Compress with compressdev, decompress with Zlib */
1434 test_data.zlib_dir = ZLIB_DECOMPRESS;
1435 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1440 /* Compress with Zlib, decompress with compressdev */
1441 test_data.zlib_dir = ZLIB_COMPRESS;
1442 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1451 rte_free(compress_xform);
1456 test_compressdev_deflate_stateless_multi_op(void)
1458 struct comp_testsuite_params *ts_params = &testsuite_params;
1459 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1460 uint16_t buf_idx[num_bufs];
1463 for (i = 0; i < num_bufs; i++)
1466 struct interim_data_params int_data = {
1470 &ts_params->def_comp_xform,
1471 &ts_params->def_decomp_xform,
1475 struct test_data_params test_data = {
1476 RTE_COMP_OP_STATELESS,
1483 /* Compress with compressdev, decompress with Zlib */
1484 test_data.zlib_dir = ZLIB_DECOMPRESS;
1485 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1488 /* Compress with Zlib, decompress with compressdev */
1489 test_data.zlib_dir = ZLIB_COMPRESS;
1490 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1493 return TEST_SUCCESS;
1497 test_compressdev_deflate_stateless_multi_level(void)
1499 struct comp_testsuite_params *ts_params = &testsuite_params;
1503 struct rte_comp_xform *compress_xform =
1504 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1506 if (compress_xform == NULL) {
1508 "Compress xform could not be created\n");
1513 memcpy(compress_xform, ts_params->def_comp_xform,
1514 sizeof(struct rte_comp_xform));
1516 struct interim_data_params int_data = {
1521 &ts_params->def_decomp_xform,
1525 struct test_data_params test_data = {
1526 RTE_COMP_OP_STATELESS,
1533 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1534 int_data.test_bufs = &compress_test_bufs[i];
1535 int_data.buf_idx = &i;
1537 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1539 compress_xform->compress.level = level;
1540 /* Compress with compressdev, decompress with Zlib */
1541 test_data.zlib_dir = ZLIB_DECOMPRESS;
1542 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1552 rte_free(compress_xform);
1556 #define NUM_XFORMS 3
1558 test_compressdev_deflate_stateless_multi_xform(void)
1560 struct comp_testsuite_params *ts_params = &testsuite_params;
1561 uint16_t num_bufs = NUM_XFORMS;
1562 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1563 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1564 const char *test_buffers[NUM_XFORMS];
1566 unsigned int level = RTE_COMP_LEVEL_MIN;
1567 uint16_t buf_idx[num_bufs];
1571 /* Create multiple xforms with various levels */
1572 for (i = 0; i < NUM_XFORMS; i++) {
1573 compress_xforms[i] = rte_malloc(NULL,
1574 sizeof(struct rte_comp_xform), 0);
1575 if (compress_xforms[i] == NULL) {
1577 "Compress xform could not be created\n");
1582 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1583 sizeof(struct rte_comp_xform));
1584 compress_xforms[i]->compress.level = level;
1587 decompress_xforms[i] = rte_malloc(NULL,
1588 sizeof(struct rte_comp_xform), 0);
1589 if (decompress_xforms[i] == NULL) {
1591 "Decompress xform could not be created\n");
1596 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1597 sizeof(struct rte_comp_xform));
1600 for (i = 0; i < NUM_XFORMS; i++) {
1602 /* Use the same buffer in all sessions */
1603 test_buffers[i] = compress_test_bufs[0];
1606 struct interim_data_params int_data = {
1615 struct test_data_params test_data = {
1616 RTE_COMP_OP_STATELESS,
1623 /* Compress with compressdev, decompress with Zlib */
1624 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1631 for (i = 0; i < NUM_XFORMS; i++) {
1632 rte_free(compress_xforms[i]);
1633 rte_free(decompress_xforms[i]);
1640 test_compressdev_deflate_stateless_sgl(void)
1642 struct comp_testsuite_params *ts_params = &testsuite_params;
1644 const struct rte_compressdev_capabilities *capab;
1646 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1647 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1649 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1652 struct interim_data_params int_data = {
1656 &ts_params->def_comp_xform,
1657 &ts_params->def_decomp_xform,
1661 struct test_data_params test_data = {
1662 RTE_COMP_OP_STATELESS,
1669 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1670 int_data.test_bufs = &compress_test_bufs[i];
1671 int_data.buf_idx = &i;
1673 /* Compress with compressdev, decompress with Zlib */
1674 test_data.zlib_dir = ZLIB_DECOMPRESS;
1675 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1678 /* Compress with Zlib, decompress with compressdev */
1679 test_data.zlib_dir = ZLIB_COMPRESS;
1680 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1683 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1684 /* Compress with compressdev, decompress with Zlib */
1685 test_data.zlib_dir = ZLIB_DECOMPRESS;
1686 test_data.buff_type = SGL_TO_LB;
1687 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1690 /* Compress with Zlib, decompress with compressdev */
1691 test_data.zlib_dir = ZLIB_COMPRESS;
1692 test_data.buff_type = SGL_TO_LB;
1693 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1697 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1698 /* Compress with compressdev, decompress with Zlib */
1699 test_data.zlib_dir = ZLIB_DECOMPRESS;
1700 test_data.buff_type = LB_TO_SGL;
1701 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1704 /* Compress with Zlib, decompress with compressdev */
1705 test_data.zlib_dir = ZLIB_COMPRESS;
1706 test_data.buff_type = LB_TO_SGL;
1707 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1714 return TEST_SUCCESS;
1719 test_compressdev_deflate_stateless_checksum(void)
1721 struct comp_testsuite_params *ts_params = &testsuite_params;
1724 const struct rte_compressdev_capabilities *capab;
1726 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1727 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1729 /* Check if driver supports any checksum */
1730 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1731 (capab->comp_feature_flags &
1732 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1733 (capab->comp_feature_flags &
1734 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1737 struct rte_comp_xform *compress_xform =
1738 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1739 if (compress_xform == NULL) {
1740 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1745 memcpy(compress_xform, ts_params->def_comp_xform,
1746 sizeof(struct rte_comp_xform));
1748 struct rte_comp_xform *decompress_xform =
1749 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1750 if (decompress_xform == NULL) {
1751 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1752 rte_free(compress_xform);
1757 memcpy(decompress_xform, ts_params->def_decomp_xform,
1758 sizeof(struct rte_comp_xform));
1760 struct interim_data_params int_data = {
1769 struct test_data_params test_data = {
1770 RTE_COMP_OP_STATELESS,
1777 /* Check if driver supports crc32 checksum and test */
1778 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1779 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1780 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1782 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1783 /* Compress with compressdev, decompress with Zlib */
1784 int_data.test_bufs = &compress_test_bufs[i];
1785 int_data.buf_idx = &i;
1787 /* Generate zlib checksum and test against selected
1788 * drivers decompression checksum
1790 test_data.zlib_dir = ZLIB_COMPRESS;
1791 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1796 /* Generate compression and decompression
1797 * checksum of selected driver
1799 test_data.zlib_dir = ZLIB_NONE;
1800 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1807 /* Check if driver supports adler32 checksum and test */
1808 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1809 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1810 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1812 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1813 int_data.test_bufs = &compress_test_bufs[i];
1814 int_data.buf_idx = &i;
1816 /* Generate zlib checksum and test against selected
1817 * drivers decompression checksum
1819 test_data.zlib_dir = ZLIB_COMPRESS;
1820 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1824 /* Generate compression and decompression
1825 * checksum of selected driver
1827 test_data.zlib_dir = ZLIB_NONE;
1828 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1835 /* Check if driver supports combined crc and adler checksum and test */
1836 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1837 compress_xform->compress.chksum =
1838 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1839 decompress_xform->decompress.chksum =
1840 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1842 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1843 int_data.test_bufs = &compress_test_bufs[i];
1844 int_data.buf_idx = &i;
1846 /* Generate compression and decompression
1847 * checksum of selected driver
1849 test_data.zlib_dir = ZLIB_NONE;
1850 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1860 rte_free(compress_xform);
1861 rte_free(decompress_xform);
1866 test_compressdev_out_of_space_buffer(void)
1868 struct comp_testsuite_params *ts_params = &testsuite_params;
1871 const struct rte_compressdev_capabilities *capab;
1873 RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1875 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1876 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1878 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1881 struct rte_comp_xform *compress_xform =
1882 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1884 if (compress_xform == NULL) {
1886 "Compress xform could not be created\n");
1891 struct interim_data_params int_data = {
1892 &compress_test_bufs[0],
1895 &ts_params->def_comp_xform,
1896 &ts_params->def_decomp_xform,
1900 struct test_data_params test_data = {
1901 RTE_COMP_OP_STATELESS,
1907 /* Compress with compressdev, decompress with Zlib */
1908 test_data.zlib_dir = ZLIB_DECOMPRESS;
1909 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1914 /* Compress with Zlib, decompress with compressdev */
1915 test_data.zlib_dir = ZLIB_COMPRESS;
1916 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1921 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1922 /* Compress with compressdev, decompress with Zlib */
1923 test_data.zlib_dir = ZLIB_DECOMPRESS;
1924 test_data.buff_type = SGL_BOTH;
1925 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1930 /* Compress with Zlib, decompress with compressdev */
1931 test_data.zlib_dir = ZLIB_COMPRESS;
1932 test_data.buff_type = SGL_BOTH;
1933 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1942 rte_free(compress_xform);
1947 test_compressdev_deflate_stateless_dynamic_big(void)
1949 struct comp_testsuite_params *ts_params = &testsuite_params;
1951 int ret = TEST_SUCCESS;
1953 const struct rte_compressdev_capabilities *capab;
1954 char *test_buffer = NULL;
1956 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1957 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1959 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1962 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1965 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1966 if (test_buffer == NULL) {
1968 "Can't allocate buffer for big-data\n");
1972 struct interim_data_params int_data = {
1973 (const char * const *)&test_buffer,
1976 &ts_params->def_comp_xform,
1977 &ts_params->def_decomp_xform,
1981 struct test_data_params test_data = {
1982 RTE_COMP_OP_STATELESS,
1989 ts_params->def_comp_xform->compress.deflate.huffman =
1990 RTE_COMP_HUFFMAN_DYNAMIC;
1992 /* fill the buffer with data based on rand. data */
1993 srand(BIG_DATA_TEST_SIZE);
1994 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
1995 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
1996 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
1998 /* Compress with compressdev, decompress with Zlib */
1999 test_data.zlib_dir = ZLIB_DECOMPRESS;
2000 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2005 /* Compress with Zlib, decompress with compressdev */
2006 test_data.zlib_dir = ZLIB_COMPRESS;
2007 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2013 ts_params->def_comp_xform->compress.deflate.huffman =
2014 RTE_COMP_HUFFMAN_DEFAULT;
2015 rte_free(test_buffer);
2020 static struct unit_test_suite compressdev_testsuite = {
2021 .suite_name = "compressdev unit test suite",
2022 .setup = testsuite_setup,
2023 .teardown = testsuite_teardown,
2024 .unit_test_cases = {
2025 TEST_CASE_ST(NULL, NULL,
2026 test_compressdev_invalid_configuration),
2027 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2028 test_compressdev_deflate_stateless_fixed),
2029 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2030 test_compressdev_deflate_stateless_dynamic),
2031 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2032 test_compressdev_deflate_stateless_dynamic_big),
2033 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 test_compressdev_deflate_stateless_multi_op),
2035 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 test_compressdev_deflate_stateless_multi_level),
2037 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 test_compressdev_deflate_stateless_multi_xform),
2039 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2040 test_compressdev_deflate_stateless_sgl),
2041 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2042 test_compressdev_deflate_stateless_checksum),
2043 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2044 test_compressdev_out_of_space_buffer),
2045 TEST_CASES_END() /**< NULL terminate unit test array */
2050 test_compressdev(void)
2052 return unit_test_suite_runner(&compressdev_testsuite);
2055 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);