1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
47 #define OUT_OF_SPACE_BUF 1
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
55 huffman_type_strings[] = {
56 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
57 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
58 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
69 LB_BOTH = 0, /* both input and output are linear*/
70 SGL_BOTH, /* both input and output are chained */
71 SGL_TO_LB, /* input buffer is chained */
72 LB_TO_SGL /* output buffer is chained */
79 struct comp_testsuite_params {
80 struct rte_mempool *large_mbuf_pool;
81 struct rte_mempool *small_mbuf_pool;
82 struct rte_mempool *big_mbuf_pool;
83 struct rte_mempool *op_pool;
84 struct rte_comp_xform *def_comp_xform;
85 struct rte_comp_xform *def_decomp_xform;
88 struct interim_data_params {
89 const char * const *test_bufs;
90 unsigned int num_bufs;
92 struct rte_comp_xform **compress_xforms;
93 struct rte_comp_xform **decompress_xforms;
94 unsigned int num_xforms;
97 struct test_data_params {
98 enum rte_comp_op_type state;
99 enum varied_buff buff_type;
100 enum zlib_direction zlib_dir;
101 unsigned int out_of_space;
102 unsigned int big_data;
105 static struct comp_testsuite_params testsuite_params = { 0 };
108 testsuite_teardown(void)
110 struct comp_testsuite_params *ts_params = &testsuite_params;
112 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
113 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
114 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
115 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
116 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
117 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
118 if (rte_mempool_in_use_count(ts_params->op_pool))
119 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
121 rte_mempool_free(ts_params->large_mbuf_pool);
122 rte_mempool_free(ts_params->small_mbuf_pool);
123 rte_mempool_free(ts_params->big_mbuf_pool);
124 rte_mempool_free(ts_params->op_pool);
125 rte_free(ts_params->def_comp_xform);
126 rte_free(ts_params->def_decomp_xform);
130 testsuite_setup(void)
132 struct comp_testsuite_params *ts_params = &testsuite_params;
133 uint32_t max_buf_size = 0;
136 if (rte_compressdev_count() == 0) {
137 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
141 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
142 rte_compressdev_name_get(0));
144 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
145 max_buf_size = RTE_MAX(max_buf_size,
146 strlen(compress_test_bufs[i]) + 1);
149 * Buffers to be used in compression and decompression.
150 * Since decompressed data might be larger than
151 * compressed data (due to block header),
152 * buffers should be big enough for both cases.
154 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
155 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
158 max_buf_size + RTE_PKTMBUF_HEADROOM,
160 if (ts_params->large_mbuf_pool == NULL) {
161 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
165 /* Create mempool with smaller buffers for SGL testing */
166 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
167 NUM_LARGE_MBUFS * MAX_SEGS,
169 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
171 if (ts_params->small_mbuf_pool == NULL) {
172 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
176 /* Create mempool with big buffers for SGL testing */
177 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
180 MAX_MBUF_SEGMENT_SIZE,
182 if (ts_params->big_mbuf_pool == NULL) {
183 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
187 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
188 0, sizeof(struct priv_op_data),
190 if (ts_params->op_pool == NULL) {
191 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
195 ts_params->def_comp_xform =
196 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
197 if (ts_params->def_comp_xform == NULL) {
199 "Default compress xform could not be created\n");
202 ts_params->def_decomp_xform =
203 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
204 if (ts_params->def_decomp_xform == NULL) {
206 "Default decompress xform could not be created\n");
210 /* Initializes default values for compress/decompress xforms */
211 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
212 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
213 ts_params->def_comp_xform->compress.deflate.huffman =
214 RTE_COMP_HUFFMAN_DEFAULT;
215 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
216 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
217 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
219 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
220 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
221 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
222 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
227 testsuite_teardown();
233 generic_ut_setup(void)
235 /* Configure compressdev (one device, one queue pair) */
236 struct rte_compressdev_config config = {
237 .socket_id = rte_socket_id(),
239 .max_nb_priv_xforms = NUM_MAX_XFORMS,
243 if (rte_compressdev_configure(0, &config) < 0) {
244 RTE_LOG(ERR, USER1, "Device configuration failed\n");
248 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
249 rte_socket_id()) < 0) {
250 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
254 if (rte_compressdev_start(0) < 0) {
255 RTE_LOG(ERR, USER1, "Device could not be started\n");
263 generic_ut_teardown(void)
265 rte_compressdev_stop(0);
266 if (rte_compressdev_close(0) < 0)
267 RTE_LOG(ERR, USER1, "Device could not be closed\n");
271 test_compressdev_invalid_configuration(void)
273 struct rte_compressdev_config invalid_config;
274 struct rte_compressdev_config valid_config = {
275 .socket_id = rte_socket_id(),
277 .max_nb_priv_xforms = NUM_MAX_XFORMS,
280 struct rte_compressdev_info dev_info;
282 /* Invalid configuration with 0 queue pairs */
283 memcpy(&invalid_config, &valid_config,
284 sizeof(struct rte_compressdev_config));
285 invalid_config.nb_queue_pairs = 0;
287 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
288 "Device configuration was successful "
289 "with no queue pairs (invalid)\n");
292 * Invalid configuration with too many queue pairs
293 * (if there is an actual maximum number of queue pairs)
295 rte_compressdev_info_get(0, &dev_info);
296 if (dev_info.max_nb_queue_pairs != 0) {
297 memcpy(&invalid_config, &valid_config,
298 sizeof(struct rte_compressdev_config));
299 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
301 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
302 "Device configuration was successful "
303 "with too many queue pairs (invalid)\n");
306 /* Invalid queue pair setup, with no number of queue pairs set */
307 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
308 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
309 "Queue pair setup was successful "
310 "with no queue pairs set (invalid)\n");
316 compare_buffers(const char *buffer1, uint32_t buffer1_len,
317 const char *buffer2, uint32_t buffer2_len)
319 if (buffer1_len != buffer2_len) {
320 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
324 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
325 RTE_LOG(ERR, USER1, "Buffers are different\n");
333 * Maps compressdev and Zlib flush flags
336 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
339 case RTE_COMP_FLUSH_NONE:
341 case RTE_COMP_FLUSH_SYNC:
343 case RTE_COMP_FLUSH_FULL:
345 case RTE_COMP_FLUSH_FINAL:
348 * There should be only the values above,
349 * so this should never happen
357 compress_zlib(struct rte_comp_op *op,
358 const struct rte_comp_xform *xform, int mem_level)
362 int strategy, window_bits, comp_level;
363 int ret = TEST_FAILED;
364 uint8_t *single_src_buf = NULL;
365 uint8_t *single_dst_buf = NULL;
367 /* initialize zlib stream */
368 stream.zalloc = Z_NULL;
369 stream.zfree = Z_NULL;
370 stream.opaque = Z_NULL;
372 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
375 strategy = Z_DEFAULT_STRATEGY;
378 * Window bits is the base two logarithm of the window size (in bytes).
379 * When doing raw DEFLATE, this number will be negative.
381 window_bits = -(xform->compress.window_size);
382 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
384 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
385 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
387 comp_level = xform->compress.level;
389 if (comp_level != RTE_COMP_LEVEL_NONE)
390 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
391 window_bits, mem_level, strategy);
393 ret = deflateInit(&stream, Z_NO_COMPRESSION);
396 printf("Zlib deflate could not be initialized\n");
400 /* Assuming stateless operation */
402 if (op->m_src->nb_segs > 1) {
403 single_src_buf = rte_malloc(NULL,
404 rte_pktmbuf_pkt_len(op->m_src), 0);
405 if (single_src_buf == NULL) {
406 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
410 if (rte_pktmbuf_read(op->m_src, op->src.offset,
411 rte_pktmbuf_pkt_len(op->m_src) -
413 single_src_buf) == NULL) {
415 "Buffer could not be read entirely\n");
419 stream.avail_in = op->src.length;
420 stream.next_in = single_src_buf;
423 stream.avail_in = op->src.length;
424 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
428 if (op->m_dst->nb_segs > 1) {
430 single_dst_buf = rte_malloc(NULL,
431 rte_pktmbuf_pkt_len(op->m_dst), 0);
432 if (single_dst_buf == NULL) {
434 "Buffer could not be allocated\n");
438 stream.avail_out = op->m_dst->pkt_len;
439 stream.next_out = single_dst_buf;
441 } else {/* linear output */
442 stream.avail_out = op->m_dst->data_len;
443 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
447 /* Stateless operation, all buffer will be compressed in one go */
448 zlib_flush = map_zlib_flush_flag(op->flush_flag);
449 ret = deflate(&stream, zlib_flush);
451 if (stream.avail_in != 0) {
452 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
456 if (ret != Z_STREAM_END)
459 /* Copy data to destination SGL */
460 if (op->m_dst->nb_segs > 1) {
461 uint32_t remaining_data = stream.total_out;
462 uint8_t *src_data = single_dst_buf;
463 struct rte_mbuf *dst_buf = op->m_dst;
465 while (remaining_data > 0) {
466 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
467 uint8_t *, op->dst.offset);
469 if (remaining_data < dst_buf->data_len) {
470 memcpy(dst_data, src_data, remaining_data);
473 memcpy(dst_data, src_data, dst_buf->data_len);
474 remaining_data -= dst_buf->data_len;
475 src_data += dst_buf->data_len;
476 dst_buf = dst_buf->next;
481 op->consumed = stream.total_in;
482 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
483 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
484 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
485 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
487 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
488 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
489 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
490 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
493 op->produced = stream.total_out;
495 op->status = RTE_COMP_OP_STATUS_SUCCESS;
496 op->output_chksum = stream.adler;
498 deflateReset(&stream);
503 rte_free(single_src_buf);
504 rte_free(single_dst_buf);
510 decompress_zlib(struct rte_comp_op *op,
511 const struct rte_comp_xform *xform)
516 int ret = TEST_FAILED;
517 uint8_t *single_src_buf = NULL;
518 uint8_t *single_dst_buf = NULL;
520 /* initialize zlib stream */
521 stream.zalloc = Z_NULL;
522 stream.zfree = Z_NULL;
523 stream.opaque = Z_NULL;
526 * Window bits is the base two logarithm of the window size (in bytes).
527 * When doing raw DEFLATE, this number will be negative.
529 window_bits = -(xform->decompress.window_size);
530 ret = inflateInit2(&stream, window_bits);
533 printf("Zlib deflate could not be initialized\n");
537 /* Assuming stateless operation */
539 if (op->m_src->nb_segs > 1) {
540 single_src_buf = rte_malloc(NULL,
541 rte_pktmbuf_pkt_len(op->m_src), 0);
542 if (single_src_buf == NULL) {
543 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
546 single_dst_buf = rte_malloc(NULL,
547 rte_pktmbuf_pkt_len(op->m_dst), 0);
548 if (single_dst_buf == NULL) {
549 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
552 if (rte_pktmbuf_read(op->m_src, 0,
553 rte_pktmbuf_pkt_len(op->m_src),
554 single_src_buf) == NULL) {
556 "Buffer could not be read entirely\n");
560 stream.avail_in = op->src.length;
561 stream.next_in = single_src_buf;
562 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
563 stream.next_out = single_dst_buf;
566 stream.avail_in = op->src.length;
567 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
568 stream.avail_out = op->m_dst->data_len;
569 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
572 /* Stateless operation, all buffer will be compressed in one go */
573 zlib_flush = map_zlib_flush_flag(op->flush_flag);
574 ret = inflate(&stream, zlib_flush);
576 if (stream.avail_in != 0) {
577 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
581 if (ret != Z_STREAM_END)
584 if (op->m_src->nb_segs > 1) {
585 uint32_t remaining_data = stream.total_out;
586 uint8_t *src_data = single_dst_buf;
587 struct rte_mbuf *dst_buf = op->m_dst;
589 while (remaining_data > 0) {
590 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
593 if (remaining_data < dst_buf->data_len) {
594 memcpy(dst_data, src_data, remaining_data);
597 memcpy(dst_data, src_data, dst_buf->data_len);
598 remaining_data -= dst_buf->data_len;
599 src_data += dst_buf->data_len;
600 dst_buf = dst_buf->next;
605 op->consumed = stream.total_in;
606 op->produced = stream.total_out;
607 op->status = RTE_COMP_OP_STATUS_SUCCESS;
609 inflateReset(&stream);
619 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
620 uint32_t total_data_size,
621 struct rte_mempool *small_mbuf_pool,
622 struct rte_mempool *large_mbuf_pool,
623 uint8_t limit_segs_in_sgl,
626 uint32_t remaining_data = total_data_size;
627 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
628 struct rte_mempool *pool;
629 struct rte_mbuf *next_seg;
632 const char *data_ptr = test_buf;
636 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
637 num_remaining_segs = limit_segs_in_sgl - 1;
640 * Allocate data in the first segment (header) and
641 * copy data if test buffer is provided
643 if (remaining_data < seg_size)
644 data_size = remaining_data;
646 data_size = seg_size;
647 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
648 if (buf_ptr == NULL) {
650 "Not enough space in the 1st buffer\n");
654 if (data_ptr != NULL) {
655 /* Copy characters without NULL terminator */
656 strncpy(buf_ptr, data_ptr, data_size);
657 data_ptr += data_size;
659 remaining_data -= data_size;
660 num_remaining_segs--;
663 * Allocate the rest of the segments,
664 * copy the rest of the data and chain the segments.
666 for (i = 0; i < num_remaining_segs; i++) {
668 if (i == (num_remaining_segs - 1)) {
670 if (remaining_data > seg_size)
671 pool = large_mbuf_pool;
673 pool = small_mbuf_pool;
674 data_size = remaining_data;
676 data_size = seg_size;
677 pool = small_mbuf_pool;
680 next_seg = rte_pktmbuf_alloc(pool);
681 if (next_seg == NULL) {
683 "New segment could not be allocated "
684 "from the mempool\n");
687 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
688 if (buf_ptr == NULL) {
690 "Not enough space in the buffer\n");
691 rte_pktmbuf_free(next_seg);
694 if (data_ptr != NULL) {
695 /* Copy characters without NULL terminator */
696 strncpy(buf_ptr, data_ptr, data_size);
697 data_ptr += data_size;
699 remaining_data -= data_size;
701 ret = rte_pktmbuf_chain(head_buf, next_seg);
703 rte_pktmbuf_free(next_seg);
705 "Segment could not chained\n");
714 * Compresses and decompresses buffer with compressdev API and Zlib API
717 test_deflate_comp_decomp(const struct interim_data_params *int_data,
718 const struct test_data_params *test_data)
720 struct comp_testsuite_params *ts_params = &testsuite_params;
721 const char * const *test_bufs = int_data->test_bufs;
722 unsigned int num_bufs = int_data->num_bufs;
723 uint16_t *buf_idx = int_data->buf_idx;
724 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
725 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
726 unsigned int num_xforms = int_data->num_xforms;
727 enum rte_comp_op_type state = test_data->state;
728 unsigned int buff_type = test_data->buff_type;
729 unsigned int out_of_space = test_data->out_of_space;
730 unsigned int big_data = test_data->big_data;
731 enum zlib_direction zlib_dir = test_data->zlib_dir;
734 struct rte_mbuf *uncomp_bufs[num_bufs];
735 struct rte_mbuf *comp_bufs[num_bufs];
736 struct rte_comp_op *ops[num_bufs];
737 struct rte_comp_op *ops_processed[num_bufs];
738 void *priv_xforms[num_bufs];
739 uint16_t num_enqd, num_deqd, num_total_deqd;
740 uint16_t num_priv_xforms = 0;
741 unsigned int deqd_retries = 0;
742 struct priv_op_data *priv_data;
745 struct rte_mempool *buf_pool;
747 /* Compressing with CompressDev */
748 unsigned int oos_zlib_decompress =
749 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
750 /* Decompressing with CompressDev */
751 unsigned int oos_zlib_compress =
752 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
753 const struct rte_compressdev_capabilities *capa =
754 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
755 char *contig_buf = NULL;
756 uint64_t compress_checksum[num_bufs];
760 "Compress device does not support DEFLATE\n");
764 /* Initialize all arrays to NULL */
765 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
766 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
767 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
768 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
769 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
772 buf_pool = ts_params->big_mbuf_pool;
773 else if (buff_type == SGL_BOTH)
774 buf_pool = ts_params->small_mbuf_pool;
776 buf_pool = ts_params->large_mbuf_pool;
778 /* Prepare the source mbufs with the data */
779 ret = rte_pktmbuf_alloc_bulk(buf_pool,
780 uncomp_bufs, num_bufs);
783 "Source mbufs could not be allocated "
784 "from the mempool\n");
788 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
789 for (i = 0; i < num_bufs; i++) {
790 data_size = strlen(test_bufs[i]) + 1;
791 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
793 big_data ? buf_pool : ts_params->small_mbuf_pool,
794 big_data ? buf_pool : ts_params->large_mbuf_pool,
795 big_data ? 0 : MAX_SEGS,
796 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
800 for (i = 0; i < num_bufs; i++) {
801 data_size = strlen(test_bufs[i]) + 1;
802 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
803 strlcpy(buf_ptr, test_bufs[i], data_size);
807 /* Prepare the destination mbufs */
808 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
811 "Destination mbufs could not be allocated "
812 "from the mempool\n");
816 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
817 for (i = 0; i < num_bufs; i++) {
818 if (out_of_space == 1 && oos_zlib_decompress)
819 data_size = OUT_OF_SPACE_BUF;
821 (data_size = strlen(test_bufs[i]) *
822 COMPRESS_BUF_SIZE_RATIO);
824 if (prepare_sgl_bufs(NULL, comp_bufs[i],
826 big_data ? buf_pool : ts_params->small_mbuf_pool,
827 big_data ? buf_pool : ts_params->large_mbuf_pool,
828 big_data ? 0 : MAX_SEGS,
829 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
835 for (i = 0; i < num_bufs; i++) {
836 if (out_of_space == 1 && oos_zlib_decompress)
837 data_size = OUT_OF_SPACE_BUF;
839 (data_size = strlen(test_bufs[i]) *
840 COMPRESS_BUF_SIZE_RATIO);
842 rte_pktmbuf_append(comp_bufs[i], data_size);
846 /* Build the compression operations */
847 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
850 "Compress operations could not be allocated "
851 "from the mempool\n");
856 for (i = 0; i < num_bufs; i++) {
857 ops[i]->m_src = uncomp_bufs[i];
858 ops[i]->m_dst = comp_bufs[i];
859 ops[i]->src.offset = 0;
860 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
861 ops[i]->dst.offset = 0;
862 if (state == RTE_COMP_OP_STATELESS) {
863 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
866 "Stateful operations are not supported "
867 "in these tests yet\n");
870 ops[i]->input_chksum = 0;
872 * Store original operation index in private data,
873 * since ordering does not have to be maintained,
874 * when dequeueing from compressdev, so a comparison
875 * at the end of the test can be done.
877 priv_data = (struct priv_op_data *) (ops[i] + 1);
878 priv_data->orig_idx = i;
881 /* Compress data (either with Zlib API or compressdev API */
882 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
883 for (i = 0; i < num_bufs; i++) {
884 const struct rte_comp_xform *compress_xform =
885 compress_xforms[i % num_xforms];
886 ret = compress_zlib(ops[i], compress_xform,
891 ops_processed[i] = ops[i];
894 /* Create compress private xform data */
895 for (i = 0; i < num_xforms; i++) {
896 ret = rte_compressdev_private_xform_create(0,
897 (const struct rte_comp_xform *)compress_xforms[i],
901 "Compression private xform "
902 "could not be created\n");
908 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
909 /* Attach shareable private xform data to ops */
910 for (i = 0; i < num_bufs; i++)
911 ops[i]->private_xform = priv_xforms[i % num_xforms];
913 /* Create rest of the private xforms for the other ops */
914 for (i = num_xforms; i < num_bufs; i++) {
915 ret = rte_compressdev_private_xform_create(0,
916 compress_xforms[i % num_xforms],
920 "Compression private xform "
921 "could not be created\n");
927 /* Attach non shareable private xform data to ops */
928 for (i = 0; i < num_bufs; i++)
929 ops[i]->private_xform = priv_xforms[i];
932 /* Enqueue and dequeue all operations */
933 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
934 if (num_enqd < num_bufs) {
936 "The operations could not be enqueued\n");
943 * If retrying a dequeue call, wait for 10 ms to allow
944 * enough time to the driver to process the operations
946 if (deqd_retries != 0) {
948 * Avoid infinite loop if not all the
949 * operations get out of the device
951 if (deqd_retries == MAX_DEQD_RETRIES) {
953 "Not all operations could be "
957 usleep(DEQUEUE_WAIT_TIME);
959 num_deqd = rte_compressdev_dequeue_burst(0, 0,
960 &ops_processed[num_total_deqd], num_bufs);
961 num_total_deqd += num_deqd;
964 } while (num_total_deqd < num_enqd);
968 /* Free compress private xforms */
969 for (i = 0; i < num_priv_xforms; i++) {
970 rte_compressdev_private_xform_free(0, priv_xforms[i]);
971 priv_xforms[i] = NULL;
976 for (i = 0; i < num_bufs; i++) {
977 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
978 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
979 const struct rte_comp_compress_xform *compress_xform =
980 &compress_xforms[xform_idx]->compress;
981 enum rte_comp_huffman huffman_type =
982 compress_xform->deflate.huffman;
983 char engine[] = "zlib (directly, not PMD)";
984 if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
985 strlcpy(engine, "PMD", sizeof(engine));
987 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
988 " %u bytes (level = %d, huffman = %s)\n",
989 buf_idx[priv_data->orig_idx], engine,
990 ops_processed[i]->consumed, ops_processed[i]->produced,
991 compress_xform->level,
992 huffman_type_strings[huffman_type]);
993 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
994 ops_processed[i]->consumed == 0 ? 0 :
995 (float)ops_processed[i]->produced /
996 ops_processed[i]->consumed * 100);
997 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
998 compress_checksum[i] = ops_processed[i]->output_chksum;
1003 * Check operation status and free source mbufs (destination mbuf and
1004 * compress operation information is needed for the decompression stage)
1006 for (i = 0; i < num_bufs; i++) {
1007 if (out_of_space && oos_zlib_decompress) {
1008 if (ops_processed[i]->status !=
1009 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1013 "Operation without expected out of "
1014 "space status error\n");
1020 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1022 "Some operations were not successful\n");
1025 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1026 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1027 uncomp_bufs[priv_data->orig_idx] = NULL;
1030 if (out_of_space && oos_zlib_decompress) {
1035 /* Allocate buffers for decompressed data */
1036 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1039 "Destination mbufs could not be allocated "
1040 "from the mempool\n");
1044 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1045 for (i = 0; i < num_bufs; i++) {
1046 priv_data = (struct priv_op_data *)
1047 (ops_processed[i] + 1);
1048 if (out_of_space == 1 && oos_zlib_compress)
1049 data_size = OUT_OF_SPACE_BUF;
1052 strlen(test_bufs[priv_data->orig_idx]) + 1;
1054 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1056 big_data ? buf_pool : ts_params->small_mbuf_pool,
1057 big_data ? buf_pool : ts_params->large_mbuf_pool,
1058 big_data ? 0 : MAX_SEGS,
1059 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1065 for (i = 0; i < num_bufs; i++) {
1066 priv_data = (struct priv_op_data *)
1067 (ops_processed[i] + 1);
1068 if (out_of_space == 1 && oos_zlib_compress)
1069 data_size = OUT_OF_SPACE_BUF;
1072 strlen(test_bufs[priv_data->orig_idx]) + 1;
1074 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1078 /* Build the decompression operations */
1079 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1082 "Decompress operations could not be allocated "
1083 "from the mempool\n");
1087 /* Source buffer is the compressed data from the previous operations */
1088 for (i = 0; i < num_bufs; i++) {
1089 ops[i]->m_src = ops_processed[i]->m_dst;
1090 ops[i]->m_dst = uncomp_bufs[i];
1091 ops[i]->src.offset = 0;
1093 * Set the length of the compressed data to the
1094 * number of bytes that were produced in the previous stage
1096 ops[i]->src.length = ops_processed[i]->produced;
1097 ops[i]->dst.offset = 0;
1098 if (state == RTE_COMP_OP_STATELESS) {
1099 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1102 "Stateful operations are not supported "
1103 "in these tests yet\n");
1106 ops[i]->input_chksum = 0;
1108 * Copy private data from previous operations,
1109 * to keep the pointer to the original buffer
1111 memcpy(ops[i] + 1, ops_processed[i] + 1,
1112 sizeof(struct priv_op_data));
1116 * Free the previous compress operations,
1117 * as they are not needed anymore
1119 rte_comp_op_bulk_free(ops_processed, num_bufs);
1121 /* Decompress data (either with Zlib API or compressdev API */
1122 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1123 for (i = 0; i < num_bufs; i++) {
1124 priv_data = (struct priv_op_data *)(ops[i] + 1);
1125 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1126 const struct rte_comp_xform *decompress_xform =
1127 decompress_xforms[xform_idx];
1129 ret = decompress_zlib(ops[i], decompress_xform);
1133 ops_processed[i] = ops[i];
1136 /* Create decompress private xform data */
1137 for (i = 0; i < num_xforms; i++) {
1138 ret = rte_compressdev_private_xform_create(0,
1139 (const struct rte_comp_xform *)decompress_xforms[i],
1143 "Decompression private xform "
1144 "could not be created\n");
1150 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1151 /* Attach shareable private xform data to ops */
1152 for (i = 0; i < num_bufs; i++) {
1153 priv_data = (struct priv_op_data *)(ops[i] + 1);
1154 uint16_t xform_idx = priv_data->orig_idx %
1156 ops[i]->private_xform = priv_xforms[xform_idx];
1159 /* Create rest of the private xforms for the other ops */
1160 for (i = num_xforms; i < num_bufs; i++) {
1161 ret = rte_compressdev_private_xform_create(0,
1162 decompress_xforms[i % num_xforms],
1166 "Decompression private xform "
1167 "could not be created\n");
1173 /* Attach non shareable private xform data to ops */
1174 for (i = 0; i < num_bufs; i++) {
1175 priv_data = (struct priv_op_data *)(ops[i] + 1);
1176 uint16_t xform_idx = priv_data->orig_idx;
1177 ops[i]->private_xform = priv_xforms[xform_idx];
1181 /* Enqueue and dequeue all operations */
1182 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1183 if (num_enqd < num_bufs) {
1185 "The operations could not be enqueued\n");
1192 * If retrying a dequeue call, wait for 10 ms to allow
1193 * enough time to the driver to process the operations
1195 if (deqd_retries != 0) {
1197 * Avoid infinite loop if not all the
1198 * operations get out of the device
1200 if (deqd_retries == MAX_DEQD_RETRIES) {
1202 "Not all operations could be "
1206 usleep(DEQUEUE_WAIT_TIME);
1208 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1209 &ops_processed[num_total_deqd], num_bufs);
1210 num_total_deqd += num_deqd;
1212 } while (num_total_deqd < num_enqd);
1217 for (i = 0; i < num_bufs; i++) {
1218 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1219 char engine[] = "zlib, (directly, no PMD)";
1220 if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1221 strlcpy(engine, "pmd", sizeof(engine));
1222 RTE_LOG(DEBUG, USER1,
1223 "Buffer %u decompressed by %s from %u to %u bytes\n",
1224 buf_idx[priv_data->orig_idx], engine,
1225 ops_processed[i]->consumed, ops_processed[i]->produced);
1230 * Check operation status and free source mbuf (destination mbuf and
1231 * compress operation information is still needed)
1233 for (i = 0; i < num_bufs; i++) {
1234 if (out_of_space && oos_zlib_compress) {
1235 if (ops_processed[i]->status !=
1236 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1240 "Operation without expected out of "
1241 "space status error\n");
1247 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1249 "Some operations were not successful\n");
1252 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1253 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1254 comp_bufs[priv_data->orig_idx] = NULL;
1257 if (out_of_space && oos_zlib_compress) {
1263 * Compare the original stream with the decompressed stream
1264 * (in size and the data)
1266 for (i = 0; i < num_bufs; i++) {
1267 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1268 const char *buf1 = test_bufs[priv_data->orig_idx];
1270 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1271 if (contig_buf == NULL) {
1272 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1277 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1278 ops_processed[i]->produced, contig_buf);
1279 if (compare_buffers(buf1, strlen(buf1) + 1,
1280 buf2, ops_processed[i]->produced) < 0)
1283 /* Test checksums */
1284 if (compress_xforms[0]->compress.chksum !=
1285 RTE_COMP_CHECKSUM_NONE) {
1286 if (ops_processed[i]->output_chksum !=
1287 compress_checksum[i]) {
1288 RTE_LOG(ERR, USER1, "The checksums differ\n"
1289 "Compression Checksum: %" PRIu64 "\tDecompression "
1290 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1291 ops_processed[i]->output_chksum);
1296 rte_free(contig_buf);
1303 /* Free resources */
1304 for (i = 0; i < num_bufs; i++) {
1305 rte_pktmbuf_free(uncomp_bufs[i]);
1306 rte_pktmbuf_free(comp_bufs[i]);
1307 rte_comp_op_free(ops[i]);
1308 rte_comp_op_free(ops_processed[i]);
1310 for (i = 0; i < num_priv_xforms; i++) {
1311 if (priv_xforms[i] != NULL)
1312 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1314 rte_free(contig_buf);
1320 test_compressdev_deflate_stateless_fixed(void)
1322 struct comp_testsuite_params *ts_params = &testsuite_params;
1325 const struct rte_compressdev_capabilities *capab;
1327 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1328 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1330 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1333 struct rte_comp_xform *compress_xform =
1334 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1336 if (compress_xform == NULL) {
1338 "Compress xform could not be created\n");
1343 memcpy(compress_xform, ts_params->def_comp_xform,
1344 sizeof(struct rte_comp_xform));
1345 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1347 struct interim_data_params int_data = {
1352 &ts_params->def_decomp_xform,
1356 struct test_data_params test_data = {
1357 RTE_COMP_OP_STATELESS,
1364 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1365 int_data.test_bufs = &compress_test_bufs[i];
1366 int_data.buf_idx = &i;
1368 /* Compress with compressdev, decompress with Zlib */
1369 test_data.zlib_dir = ZLIB_DECOMPRESS;
1370 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1375 /* Compress with Zlib, decompress with compressdev */
1376 test_data.zlib_dir = ZLIB_COMPRESS;
1377 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1386 rte_free(compress_xform);
1391 test_compressdev_deflate_stateless_dynamic(void)
1393 struct comp_testsuite_params *ts_params = &testsuite_params;
1396 struct rte_comp_xform *compress_xform =
1397 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1399 const struct rte_compressdev_capabilities *capab;
1401 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1402 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1404 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1407 if (compress_xform == NULL) {
1409 "Compress xform could not be created\n");
1414 memcpy(compress_xform, ts_params->def_comp_xform,
1415 sizeof(struct rte_comp_xform));
1416 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1418 struct interim_data_params int_data = {
1423 &ts_params->def_decomp_xform,
1427 struct test_data_params test_data = {
1428 RTE_COMP_OP_STATELESS,
1435 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1436 int_data.test_bufs = &compress_test_bufs[i];
1437 int_data.buf_idx = &i;
1439 /* Compress with compressdev, decompress with Zlib */
1440 test_data.zlib_dir = ZLIB_DECOMPRESS;
1441 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1446 /* Compress with Zlib, decompress with compressdev */
1447 test_data.zlib_dir = ZLIB_COMPRESS;
1448 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1457 rte_free(compress_xform);
1462 test_compressdev_deflate_stateless_multi_op(void)
1464 struct comp_testsuite_params *ts_params = &testsuite_params;
1465 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1466 uint16_t buf_idx[num_bufs];
1469 for (i = 0; i < num_bufs; i++)
1472 struct interim_data_params int_data = {
1476 &ts_params->def_comp_xform,
1477 &ts_params->def_decomp_xform,
1481 struct test_data_params test_data = {
1482 RTE_COMP_OP_STATELESS,
1489 /* Compress with compressdev, decompress with Zlib */
1490 test_data.zlib_dir = ZLIB_DECOMPRESS;
1491 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1494 /* Compress with Zlib, decompress with compressdev */
1495 test_data.zlib_dir = ZLIB_COMPRESS;
1496 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1499 return TEST_SUCCESS;
1503 test_compressdev_deflate_stateless_multi_level(void)
1505 struct comp_testsuite_params *ts_params = &testsuite_params;
1509 struct rte_comp_xform *compress_xform =
1510 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1512 if (compress_xform == NULL) {
1514 "Compress xform could not be created\n");
1519 memcpy(compress_xform, ts_params->def_comp_xform,
1520 sizeof(struct rte_comp_xform));
1522 struct interim_data_params int_data = {
1527 &ts_params->def_decomp_xform,
1531 struct test_data_params test_data = {
1532 RTE_COMP_OP_STATELESS,
1539 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1540 int_data.test_bufs = &compress_test_bufs[i];
1541 int_data.buf_idx = &i;
1543 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1545 compress_xform->compress.level = level;
1546 /* Compress with compressdev, decompress with Zlib */
1547 test_data.zlib_dir = ZLIB_DECOMPRESS;
1548 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1558 rte_free(compress_xform);
1562 #define NUM_XFORMS 3
1564 test_compressdev_deflate_stateless_multi_xform(void)
1566 struct comp_testsuite_params *ts_params = &testsuite_params;
1567 uint16_t num_bufs = NUM_XFORMS;
1568 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1569 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1570 const char *test_buffers[NUM_XFORMS];
1572 unsigned int level = RTE_COMP_LEVEL_MIN;
1573 uint16_t buf_idx[num_bufs];
1577 /* Create multiple xforms with various levels */
1578 for (i = 0; i < NUM_XFORMS; i++) {
1579 compress_xforms[i] = rte_malloc(NULL,
1580 sizeof(struct rte_comp_xform), 0);
1581 if (compress_xforms[i] == NULL) {
1583 "Compress xform could not be created\n");
1588 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1589 sizeof(struct rte_comp_xform));
1590 compress_xforms[i]->compress.level = level;
1593 decompress_xforms[i] = rte_malloc(NULL,
1594 sizeof(struct rte_comp_xform), 0);
1595 if (decompress_xforms[i] == NULL) {
1597 "Decompress xform could not be created\n");
1602 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1603 sizeof(struct rte_comp_xform));
1606 for (i = 0; i < NUM_XFORMS; i++) {
1608 /* Use the same buffer in all sessions */
1609 test_buffers[i] = compress_test_bufs[0];
1612 struct interim_data_params int_data = {
1621 struct test_data_params test_data = {
1622 RTE_COMP_OP_STATELESS,
1629 /* Compress with compressdev, decompress with Zlib */
1630 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1637 for (i = 0; i < NUM_XFORMS; i++) {
1638 rte_free(compress_xforms[i]);
1639 rte_free(decompress_xforms[i]);
1646 test_compressdev_deflate_stateless_sgl(void)
1648 struct comp_testsuite_params *ts_params = &testsuite_params;
1650 const struct rte_compressdev_capabilities *capab;
1652 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1653 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1655 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1658 struct interim_data_params int_data = {
1662 &ts_params->def_comp_xform,
1663 &ts_params->def_decomp_xform,
1667 struct test_data_params test_data = {
1668 RTE_COMP_OP_STATELESS,
1675 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1676 int_data.test_bufs = &compress_test_bufs[i];
1677 int_data.buf_idx = &i;
1679 /* Compress with compressdev, decompress with Zlib */
1680 test_data.zlib_dir = ZLIB_DECOMPRESS;
1681 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1684 /* Compress with Zlib, decompress with compressdev */
1685 test_data.zlib_dir = ZLIB_COMPRESS;
1686 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1689 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1690 /* Compress with compressdev, decompress with Zlib */
1691 test_data.zlib_dir = ZLIB_DECOMPRESS;
1692 test_data.buff_type = SGL_TO_LB;
1693 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1696 /* Compress with Zlib, decompress with compressdev */
1697 test_data.zlib_dir = ZLIB_COMPRESS;
1698 test_data.buff_type = SGL_TO_LB;
1699 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1703 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1704 /* Compress with compressdev, decompress with Zlib */
1705 test_data.zlib_dir = ZLIB_DECOMPRESS;
1706 test_data.buff_type = LB_TO_SGL;
1707 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1710 /* Compress with Zlib, decompress with compressdev */
1711 test_data.zlib_dir = ZLIB_COMPRESS;
1712 test_data.buff_type = LB_TO_SGL;
1713 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1720 return TEST_SUCCESS;
1725 test_compressdev_deflate_stateless_checksum(void)
1727 struct comp_testsuite_params *ts_params = &testsuite_params;
1730 const struct rte_compressdev_capabilities *capab;
1732 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1733 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1735 /* Check if driver supports any checksum */
1736 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1737 (capab->comp_feature_flags &
1738 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1739 (capab->comp_feature_flags &
1740 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1743 struct rte_comp_xform *compress_xform =
1744 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1745 if (compress_xform == NULL) {
1746 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1751 memcpy(compress_xform, ts_params->def_comp_xform,
1752 sizeof(struct rte_comp_xform));
1754 struct rte_comp_xform *decompress_xform =
1755 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1756 if (decompress_xform == NULL) {
1757 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1758 rte_free(compress_xform);
1763 memcpy(decompress_xform, ts_params->def_decomp_xform,
1764 sizeof(struct rte_comp_xform));
1766 struct interim_data_params int_data = {
1775 struct test_data_params test_data = {
1776 RTE_COMP_OP_STATELESS,
1783 /* Check if driver supports crc32 checksum and test */
1784 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1785 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1786 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1788 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1789 /* Compress with compressdev, decompress with Zlib */
1790 int_data.test_bufs = &compress_test_bufs[i];
1791 int_data.buf_idx = &i;
1793 /* Generate zlib checksum and test against selected
1794 * drivers decompression checksum
1796 test_data.zlib_dir = ZLIB_COMPRESS;
1797 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1802 /* Generate compression and decompression
1803 * checksum of selected driver
1805 test_data.zlib_dir = ZLIB_NONE;
1806 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1813 /* Check if driver supports adler32 checksum and test */
1814 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1815 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1816 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1818 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1819 int_data.test_bufs = &compress_test_bufs[i];
1820 int_data.buf_idx = &i;
1822 /* Generate zlib checksum and test against selected
1823 * drivers decompression checksum
1825 test_data.zlib_dir = ZLIB_COMPRESS;
1826 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1830 /* Generate compression and decompression
1831 * checksum of selected driver
1833 test_data.zlib_dir = ZLIB_NONE;
1834 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1841 /* Check if driver supports combined crc and adler checksum and test */
1842 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1843 compress_xform->compress.chksum =
1844 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1845 decompress_xform->decompress.chksum =
1846 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1848 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1849 int_data.test_bufs = &compress_test_bufs[i];
1850 int_data.buf_idx = &i;
1852 /* Generate compression and decompression
1853 * checksum of selected driver
1855 test_data.zlib_dir = ZLIB_NONE;
1856 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1866 rte_free(compress_xform);
1867 rte_free(decompress_xform);
1872 test_compressdev_out_of_space_buffer(void)
1874 struct comp_testsuite_params *ts_params = &testsuite_params;
1877 const struct rte_compressdev_capabilities *capab;
1879 RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
1881 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1882 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1884 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1887 struct rte_comp_xform *compress_xform =
1888 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1890 if (compress_xform == NULL) {
1892 "Compress xform could not be created\n");
1897 struct interim_data_params int_data = {
1898 &compress_test_bufs[0],
1901 &ts_params->def_comp_xform,
1902 &ts_params->def_decomp_xform,
1906 struct test_data_params test_data = {
1907 RTE_COMP_OP_STATELESS,
1910 1, /* run out-of-space test */
1913 /* Compress with compressdev, decompress with Zlib */
1914 test_data.zlib_dir = ZLIB_DECOMPRESS;
1915 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1920 /* Compress with Zlib, decompress with compressdev */
1921 test_data.zlib_dir = ZLIB_COMPRESS;
1922 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1927 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1928 /* Compress with compressdev, decompress with Zlib */
1929 test_data.zlib_dir = ZLIB_DECOMPRESS;
1930 test_data.buff_type = SGL_BOTH;
1931 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1936 /* Compress with Zlib, decompress with compressdev */
1937 test_data.zlib_dir = ZLIB_COMPRESS;
1938 test_data.buff_type = SGL_BOTH;
1939 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1948 rte_free(compress_xform);
1953 test_compressdev_deflate_stateless_dynamic_big(void)
1955 struct comp_testsuite_params *ts_params = &testsuite_params;
1957 int ret = TEST_SUCCESS;
1959 const struct rte_compressdev_capabilities *capab;
1960 char *test_buffer = NULL;
1962 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1963 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1965 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1968 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1971 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1972 if (test_buffer == NULL) {
1974 "Can't allocate buffer for big-data\n");
1978 struct interim_data_params int_data = {
1979 (const char * const *)&test_buffer,
1982 &ts_params->def_comp_xform,
1983 &ts_params->def_decomp_xform,
1987 struct test_data_params test_data = {
1988 RTE_COMP_OP_STATELESS,
1995 ts_params->def_comp_xform->compress.deflate.huffman =
1996 RTE_COMP_HUFFMAN_DYNAMIC;
1998 /* fill the buffer with data based on rand. data */
1999 srand(BIG_DATA_TEST_SIZE);
2000 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2001 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2002 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2004 /* Compress with compressdev, decompress with Zlib */
2005 test_data.zlib_dir = ZLIB_DECOMPRESS;
2006 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2011 /* Compress with Zlib, decompress with compressdev */
2012 test_data.zlib_dir = ZLIB_COMPRESS;
2013 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2019 ts_params->def_comp_xform->compress.deflate.huffman =
2020 RTE_COMP_HUFFMAN_DEFAULT;
2021 rte_free(test_buffer);
2026 static struct unit_test_suite compressdev_testsuite = {
2027 .suite_name = "compressdev unit test suite",
2028 .setup = testsuite_setup,
2029 .teardown = testsuite_teardown,
2030 .unit_test_cases = {
2031 TEST_CASE_ST(NULL, NULL,
2032 test_compressdev_invalid_configuration),
2033 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 test_compressdev_deflate_stateless_fixed),
2035 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 test_compressdev_deflate_stateless_dynamic),
2037 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 test_compressdev_deflate_stateless_dynamic_big),
2039 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2040 test_compressdev_deflate_stateless_multi_op),
2041 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2042 test_compressdev_deflate_stateless_multi_level),
2043 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2044 test_compressdev_deflate_stateless_multi_xform),
2045 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2046 test_compressdev_deflate_stateless_sgl),
2047 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2048 test_compressdev_deflate_stateless_checksum),
2049 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2050 test_compressdev_out_of_space_buffer),
2051 TEST_CASES_END() /**< NULL terminate unit test array */
2056 test_compressdev(void)
2058 return unit_test_suite_runner(&compressdev_testsuite);
2061 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);