1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
13 #include <rte_compressdev.h>
14 #include <rte_string_fns.h>
16 #include "test_compressdev_test_buffer.h"
19 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
21 #define DEFAULT_WINDOW_SIZE 15
22 #define DEFAULT_MEM_LEVEL 8
23 #define MAX_DEQD_RETRIES 10
24 #define DEQUEUE_WAIT_TIME 10000
27 * 30% extra size for compressed data compared to original data,
28 * in case data size cannot be reduced and it is actually bigger
29 * due to the compress block headers
31 #define COMPRESS_BUF_SIZE_RATIO 1.3
32 #define NUM_LARGE_MBUFS 16
33 #define SMALL_SEG_SIZE 256
36 #define NUM_MAX_XFORMS 16
37 #define NUM_MAX_INFLIGHT_OPS 128
40 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
41 #define ZLIB_HEADER_SIZE 2
42 #define ZLIB_TRAILER_SIZE 4
43 #define GZIP_HEADER_SIZE 10
44 #define GZIP_TRAILER_SIZE 8
46 #define OUT_OF_SPACE_BUF 1
48 #define MAX_MBUF_SEGMENT_SIZE 65535
49 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
50 #define NUM_BIG_MBUFS 4
51 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
54 huffman_type_strings[] = {
55 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
56 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
57 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
68 LB_BOTH = 0, /* both input and output are linear*/
69 SGL_BOTH, /* both input and output are chained */
70 SGL_TO_LB, /* input buffer is chained */
71 LB_TO_SGL /* output buffer is chained */
78 struct comp_testsuite_params {
79 struct rte_mempool *large_mbuf_pool;
80 struct rte_mempool *small_mbuf_pool;
81 struct rte_mempool *big_mbuf_pool;
82 struct rte_mempool *op_pool;
83 struct rte_comp_xform *def_comp_xform;
84 struct rte_comp_xform *def_decomp_xform;
87 struct interim_data_params {
88 const char * const *test_bufs;
89 unsigned int num_bufs;
91 struct rte_comp_xform **compress_xforms;
92 struct rte_comp_xform **decompress_xforms;
93 unsigned int num_xforms;
96 struct test_data_params {
97 enum rte_comp_op_type state;
98 enum varied_buff buff_type;
99 enum zlib_direction zlib_dir;
100 unsigned int out_of_space;
101 unsigned int big_data;
104 static struct comp_testsuite_params testsuite_params = { 0 };
107 testsuite_teardown(void)
109 struct comp_testsuite_params *ts_params = &testsuite_params;
111 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
112 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
113 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
114 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
115 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
116 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
117 if (rte_mempool_in_use_count(ts_params->op_pool))
118 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
120 rte_mempool_free(ts_params->large_mbuf_pool);
121 rte_mempool_free(ts_params->small_mbuf_pool);
122 rte_mempool_free(ts_params->big_mbuf_pool);
123 rte_mempool_free(ts_params->op_pool);
124 rte_free(ts_params->def_comp_xform);
125 rte_free(ts_params->def_decomp_xform);
129 testsuite_setup(void)
131 struct comp_testsuite_params *ts_params = &testsuite_params;
132 uint32_t max_buf_size = 0;
135 if (rte_compressdev_count() == 0) {
136 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
140 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
141 rte_compressdev_name_get(0));
143 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
144 max_buf_size = RTE_MAX(max_buf_size,
145 strlen(compress_test_bufs[i]) + 1);
148 * Buffers to be used in compression and decompression.
149 * Since decompressed data might be larger than
150 * compressed data (due to block header),
151 * buffers should be big enough for both cases.
153 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
154 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
157 max_buf_size + RTE_PKTMBUF_HEADROOM,
159 if (ts_params->large_mbuf_pool == NULL) {
160 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
164 /* Create mempool with smaller buffers for SGL testing */
165 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
166 NUM_LARGE_MBUFS * MAX_SEGS,
168 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
170 if (ts_params->small_mbuf_pool == NULL) {
171 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
175 /* Create mempool with big buffers for SGL testing */
176 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
179 MAX_MBUF_SEGMENT_SIZE,
181 if (ts_params->big_mbuf_pool == NULL) {
182 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
186 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
187 0, sizeof(struct priv_op_data),
189 if (ts_params->op_pool == NULL) {
190 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
194 ts_params->def_comp_xform =
195 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
196 if (ts_params->def_comp_xform == NULL) {
198 "Default compress xform could not be created\n");
201 ts_params->def_decomp_xform =
202 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
203 if (ts_params->def_decomp_xform == NULL) {
205 "Default decompress xform could not be created\n");
209 /* Initializes default values for compress/decompress xforms */
210 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
211 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
212 ts_params->def_comp_xform->compress.deflate.huffman =
213 RTE_COMP_HUFFMAN_DEFAULT;
214 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
215 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
216 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
218 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
219 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
220 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
221 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
226 testsuite_teardown();
232 generic_ut_setup(void)
234 /* Configure compressdev (one device, one queue pair) */
235 struct rte_compressdev_config config = {
236 .socket_id = rte_socket_id(),
238 .max_nb_priv_xforms = NUM_MAX_XFORMS,
242 if (rte_compressdev_configure(0, &config) < 0) {
243 RTE_LOG(ERR, USER1, "Device configuration failed\n");
247 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
248 rte_socket_id()) < 0) {
249 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
253 if (rte_compressdev_start(0) < 0) {
254 RTE_LOG(ERR, USER1, "Device could not be started\n");
262 generic_ut_teardown(void)
264 rte_compressdev_stop(0);
265 if (rte_compressdev_close(0) < 0)
266 RTE_LOG(ERR, USER1, "Device could not be closed\n");
270 test_compressdev_invalid_configuration(void)
272 struct rte_compressdev_config invalid_config;
273 struct rte_compressdev_config valid_config = {
274 .socket_id = rte_socket_id(),
276 .max_nb_priv_xforms = NUM_MAX_XFORMS,
279 struct rte_compressdev_info dev_info;
281 /* Invalid configuration with 0 queue pairs */
282 memcpy(&invalid_config, &valid_config,
283 sizeof(struct rte_compressdev_config));
284 invalid_config.nb_queue_pairs = 0;
286 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
287 "Device configuration was successful "
288 "with no queue pairs (invalid)\n");
291 * Invalid configuration with too many queue pairs
292 * (if there is an actual maximum number of queue pairs)
294 rte_compressdev_info_get(0, &dev_info);
295 if (dev_info.max_nb_queue_pairs != 0) {
296 memcpy(&invalid_config, &valid_config,
297 sizeof(struct rte_compressdev_config));
298 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
300 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
301 "Device configuration was successful "
302 "with too many queue pairs (invalid)\n");
305 /* Invalid queue pair setup, with no number of queue pairs set */
306 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
307 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
308 "Queue pair setup was successful "
309 "with no queue pairs set (invalid)\n");
315 compare_buffers(const char *buffer1, uint32_t buffer1_len,
316 const char *buffer2, uint32_t buffer2_len)
318 if (buffer1_len != buffer2_len) {
319 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
323 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
324 RTE_LOG(ERR, USER1, "Buffers are different\n");
332 * Maps compressdev and Zlib flush flags
335 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
338 case RTE_COMP_FLUSH_NONE:
340 case RTE_COMP_FLUSH_SYNC:
342 case RTE_COMP_FLUSH_FULL:
344 case RTE_COMP_FLUSH_FINAL:
347 * There should be only the values above,
348 * so this should never happen
356 compress_zlib(struct rte_comp_op *op,
357 const struct rte_comp_xform *xform, int mem_level)
361 int strategy, window_bits, comp_level;
362 int ret = TEST_FAILED;
363 uint8_t *single_src_buf = NULL;
364 uint8_t *single_dst_buf = NULL;
366 /* initialize zlib stream */
367 stream.zalloc = Z_NULL;
368 stream.zfree = Z_NULL;
369 stream.opaque = Z_NULL;
371 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
374 strategy = Z_DEFAULT_STRATEGY;
377 * Window bits is the base two logarithm of the window size (in bytes).
378 * When doing raw DEFLATE, this number will be negative.
380 window_bits = -(xform->compress.window_size);
381 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
383 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
384 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
386 comp_level = xform->compress.level;
388 if (comp_level != RTE_COMP_LEVEL_NONE)
389 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
390 window_bits, mem_level, strategy);
392 ret = deflateInit(&stream, Z_NO_COMPRESSION);
395 printf("Zlib deflate could not be initialized\n");
399 /* Assuming stateless operation */
401 if (op->m_src->nb_segs > 1) {
402 single_src_buf = rte_malloc(NULL,
403 rte_pktmbuf_pkt_len(op->m_src), 0);
404 if (single_src_buf == NULL) {
405 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
409 if (rte_pktmbuf_read(op->m_src, op->src.offset,
410 rte_pktmbuf_pkt_len(op->m_src) -
412 single_src_buf) == NULL) {
414 "Buffer could not be read entirely\n");
418 stream.avail_in = op->src.length;
419 stream.next_in = single_src_buf;
422 stream.avail_in = op->src.length;
423 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
427 if (op->m_dst->nb_segs > 1) {
429 single_dst_buf = rte_malloc(NULL,
430 rte_pktmbuf_pkt_len(op->m_dst), 0);
431 if (single_dst_buf == NULL) {
433 "Buffer could not be allocated\n");
437 stream.avail_out = op->m_dst->pkt_len;
438 stream.next_out = single_dst_buf;
440 } else {/* linear output */
441 stream.avail_out = op->m_dst->data_len;
442 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
446 /* Stateless operation, all buffer will be compressed in one go */
447 zlib_flush = map_zlib_flush_flag(op->flush_flag);
448 ret = deflate(&stream, zlib_flush);
450 if (stream.avail_in != 0) {
451 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
455 if (ret != Z_STREAM_END)
458 /* Copy data to destination SGL */
459 if (op->m_dst->nb_segs > 1) {
460 uint32_t remaining_data = stream.total_out;
461 uint8_t *src_data = single_dst_buf;
462 struct rte_mbuf *dst_buf = op->m_dst;
464 while (remaining_data > 0) {
465 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
466 uint8_t *, op->dst.offset);
468 if (remaining_data < dst_buf->data_len) {
469 memcpy(dst_data, src_data, remaining_data);
472 memcpy(dst_data, src_data, dst_buf->data_len);
473 remaining_data -= dst_buf->data_len;
474 src_data += dst_buf->data_len;
475 dst_buf = dst_buf->next;
480 op->consumed = stream.total_in;
481 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
482 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
483 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
484 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
486 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
487 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
488 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
489 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
492 op->produced = stream.total_out;
494 op->status = RTE_COMP_OP_STATUS_SUCCESS;
495 op->output_chksum = stream.adler;
497 deflateReset(&stream);
502 rte_free(single_src_buf);
503 rte_free(single_dst_buf);
509 decompress_zlib(struct rte_comp_op *op,
510 const struct rte_comp_xform *xform)
515 int ret = TEST_FAILED;
516 uint8_t *single_src_buf = NULL;
517 uint8_t *single_dst_buf = NULL;
519 /* initialize zlib stream */
520 stream.zalloc = Z_NULL;
521 stream.zfree = Z_NULL;
522 stream.opaque = Z_NULL;
525 * Window bits is the base two logarithm of the window size (in bytes).
526 * When doing raw DEFLATE, this number will be negative.
528 window_bits = -(xform->decompress.window_size);
529 ret = inflateInit2(&stream, window_bits);
532 printf("Zlib deflate could not be initialized\n");
536 /* Assuming stateless operation */
538 if (op->m_src->nb_segs > 1) {
539 single_src_buf = rte_malloc(NULL,
540 rte_pktmbuf_pkt_len(op->m_src), 0);
541 if (single_src_buf == NULL) {
542 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
545 single_dst_buf = rte_malloc(NULL,
546 rte_pktmbuf_pkt_len(op->m_dst), 0);
547 if (single_dst_buf == NULL) {
548 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
551 if (rte_pktmbuf_read(op->m_src, 0,
552 rte_pktmbuf_pkt_len(op->m_src),
553 single_src_buf) == NULL) {
555 "Buffer could not be read entirely\n");
559 stream.avail_in = op->src.length;
560 stream.next_in = single_src_buf;
561 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
562 stream.next_out = single_dst_buf;
565 stream.avail_in = op->src.length;
566 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
567 stream.avail_out = op->m_dst->data_len;
568 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
571 /* Stateless operation, all buffer will be compressed in one go */
572 zlib_flush = map_zlib_flush_flag(op->flush_flag);
573 ret = inflate(&stream, zlib_flush);
575 if (stream.avail_in != 0) {
576 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
580 if (ret != Z_STREAM_END)
583 if (op->m_src->nb_segs > 1) {
584 uint32_t remaining_data = stream.total_out;
585 uint8_t *src_data = single_dst_buf;
586 struct rte_mbuf *dst_buf = op->m_dst;
588 while (remaining_data > 0) {
589 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
592 if (remaining_data < dst_buf->data_len) {
593 memcpy(dst_data, src_data, remaining_data);
596 memcpy(dst_data, src_data, dst_buf->data_len);
597 remaining_data -= dst_buf->data_len;
598 src_data += dst_buf->data_len;
599 dst_buf = dst_buf->next;
604 op->consumed = stream.total_in;
605 op->produced = stream.total_out;
606 op->status = RTE_COMP_OP_STATUS_SUCCESS;
608 inflateReset(&stream);
618 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
619 uint32_t total_data_size,
620 struct rte_mempool *small_mbuf_pool,
621 struct rte_mempool *large_mbuf_pool,
622 uint8_t limit_segs_in_sgl,
625 uint32_t remaining_data = total_data_size;
626 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
627 struct rte_mempool *pool;
628 struct rte_mbuf *next_seg;
631 const char *data_ptr = test_buf;
635 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
636 num_remaining_segs = limit_segs_in_sgl - 1;
639 * Allocate data in the first segment (header) and
640 * copy data if test buffer is provided
642 if (remaining_data < seg_size)
643 data_size = remaining_data;
645 data_size = seg_size;
646 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
647 if (buf_ptr == NULL) {
649 "Not enough space in the 1st buffer\n");
653 if (data_ptr != NULL) {
654 /* Copy characters without NULL terminator */
655 strncpy(buf_ptr, data_ptr, data_size);
656 data_ptr += data_size;
658 remaining_data -= data_size;
659 num_remaining_segs--;
662 * Allocate the rest of the segments,
663 * copy the rest of the data and chain the segments.
665 for (i = 0; i < num_remaining_segs; i++) {
667 if (i == (num_remaining_segs - 1)) {
669 if (remaining_data > seg_size)
670 pool = large_mbuf_pool;
672 pool = small_mbuf_pool;
673 data_size = remaining_data;
675 data_size = seg_size;
676 pool = small_mbuf_pool;
679 next_seg = rte_pktmbuf_alloc(pool);
680 if (next_seg == NULL) {
682 "New segment could not be allocated "
683 "from the mempool\n");
686 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
687 if (buf_ptr == NULL) {
689 "Not enough space in the buffer\n");
690 rte_pktmbuf_free(next_seg);
693 if (data_ptr != NULL) {
694 /* Copy characters without NULL terminator */
695 strncpy(buf_ptr, data_ptr, data_size);
696 data_ptr += data_size;
698 remaining_data -= data_size;
700 ret = rte_pktmbuf_chain(head_buf, next_seg);
702 rte_pktmbuf_free(next_seg);
704 "Segment could not chained\n");
713 * Compresses and decompresses buffer with compressdev API and Zlib API
716 test_deflate_comp_decomp(const struct interim_data_params *int_data,
717 const struct test_data_params *test_data)
719 struct comp_testsuite_params *ts_params = &testsuite_params;
720 const char * const *test_bufs = int_data->test_bufs;
721 unsigned int num_bufs = int_data->num_bufs;
722 uint16_t *buf_idx = int_data->buf_idx;
723 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
724 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
725 unsigned int num_xforms = int_data->num_xforms;
726 enum rte_comp_op_type state = test_data->state;
727 unsigned int buff_type = test_data->buff_type;
728 unsigned int out_of_space = test_data->out_of_space;
729 unsigned int big_data = test_data->big_data;
730 enum zlib_direction zlib_dir = test_data->zlib_dir;
733 struct rte_mbuf *uncomp_bufs[num_bufs];
734 struct rte_mbuf *comp_bufs[num_bufs];
735 struct rte_comp_op *ops[num_bufs];
736 struct rte_comp_op *ops_processed[num_bufs];
737 void *priv_xforms[num_bufs];
738 uint16_t num_enqd, num_deqd, num_total_deqd;
739 uint16_t num_priv_xforms = 0;
740 unsigned int deqd_retries = 0;
741 struct priv_op_data *priv_data;
744 struct rte_mempool *buf_pool;
746 /* Compressing with CompressDev */
747 unsigned int oos_zlib_decompress =
748 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
749 /* Decompressing with CompressDev */
750 unsigned int oos_zlib_compress =
751 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
752 const struct rte_compressdev_capabilities *capa =
753 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
754 char *contig_buf = NULL;
755 uint64_t compress_checksum[num_bufs];
757 /* Initialize all arrays to NULL */
758 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
759 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
760 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
761 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
762 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
765 buf_pool = ts_params->big_mbuf_pool;
766 else if (buff_type == SGL_BOTH)
767 buf_pool = ts_params->small_mbuf_pool;
769 buf_pool = ts_params->large_mbuf_pool;
771 /* Prepare the source mbufs with the data */
772 ret = rte_pktmbuf_alloc_bulk(buf_pool,
773 uncomp_bufs, num_bufs);
776 "Source mbufs could not be allocated "
777 "from the mempool\n");
781 if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
782 for (i = 0; i < num_bufs; i++) {
783 data_size = strlen(test_bufs[i]) + 1;
784 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
786 big_data ? buf_pool : ts_params->small_mbuf_pool,
787 big_data ? buf_pool : ts_params->large_mbuf_pool,
788 big_data ? 0 : MAX_SEGS,
789 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
793 for (i = 0; i < num_bufs; i++) {
794 data_size = strlen(test_bufs[i]) + 1;
795 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
796 strlcpy(buf_ptr, test_bufs[i], data_size);
800 /* Prepare the destination mbufs */
801 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
804 "Destination mbufs could not be allocated "
805 "from the mempool\n");
809 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
810 for (i = 0; i < num_bufs; i++) {
811 if (out_of_space == 1 && oos_zlib_decompress)
812 data_size = OUT_OF_SPACE_BUF;
814 (data_size = strlen(test_bufs[i]) *
815 COMPRESS_BUF_SIZE_RATIO);
817 if (prepare_sgl_bufs(NULL, comp_bufs[i],
819 big_data ? buf_pool : ts_params->small_mbuf_pool,
820 big_data ? buf_pool : ts_params->large_mbuf_pool,
821 big_data ? 0 : MAX_SEGS,
822 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
828 for (i = 0; i < num_bufs; i++) {
829 if (out_of_space == 1 && oos_zlib_decompress)
830 data_size = OUT_OF_SPACE_BUF;
832 (data_size = strlen(test_bufs[i]) *
833 COMPRESS_BUF_SIZE_RATIO);
835 rte_pktmbuf_append(comp_bufs[i], data_size);
839 /* Build the compression operations */
840 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
843 "Compress operations could not be allocated "
844 "from the mempool\n");
849 for (i = 0; i < num_bufs; i++) {
850 ops[i]->m_src = uncomp_bufs[i];
851 ops[i]->m_dst = comp_bufs[i];
852 ops[i]->src.offset = 0;
853 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
854 ops[i]->dst.offset = 0;
855 if (state == RTE_COMP_OP_STATELESS) {
856 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
859 "Stateful operations are not supported "
860 "in these tests yet\n");
863 ops[i]->input_chksum = 0;
865 * Store original operation index in private data,
866 * since ordering does not have to be maintained,
867 * when dequeueing from compressdev, so a comparison
868 * at the end of the test can be done.
870 priv_data = (struct priv_op_data *) (ops[i] + 1);
871 priv_data->orig_idx = i;
874 /* Compress data (either with Zlib API or compressdev API */
875 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
876 for (i = 0; i < num_bufs; i++) {
877 const struct rte_comp_xform *compress_xform =
878 compress_xforms[i % num_xforms];
879 ret = compress_zlib(ops[i], compress_xform,
884 ops_processed[i] = ops[i];
887 /* Create compress private xform data */
888 for (i = 0; i < num_xforms; i++) {
889 ret = rte_compressdev_private_xform_create(0,
890 (const struct rte_comp_xform *)compress_xforms[i],
894 "Compression private xform "
895 "could not be created\n");
901 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
902 /* Attach shareable private xform data to ops */
903 for (i = 0; i < num_bufs; i++)
904 ops[i]->private_xform = priv_xforms[i % num_xforms];
906 /* Create rest of the private xforms for the other ops */
907 for (i = num_xforms; i < num_bufs; i++) {
908 ret = rte_compressdev_private_xform_create(0,
909 compress_xforms[i % num_xforms],
913 "Compression private xform "
914 "could not be created\n");
920 /* Attach non shareable private xform data to ops */
921 for (i = 0; i < num_bufs; i++)
922 ops[i]->private_xform = priv_xforms[i];
925 /* Enqueue and dequeue all operations */
926 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
927 if (num_enqd < num_bufs) {
929 "The operations could not be enqueued\n");
936 * If retrying a dequeue call, wait for 10 ms to allow
937 * enough time to the driver to process the operations
939 if (deqd_retries != 0) {
941 * Avoid infinite loop if not all the
942 * operations get out of the device
944 if (deqd_retries == MAX_DEQD_RETRIES) {
946 "Not all operations could be "
950 usleep(DEQUEUE_WAIT_TIME);
952 num_deqd = rte_compressdev_dequeue_burst(0, 0,
953 &ops_processed[num_total_deqd], num_bufs);
954 num_total_deqd += num_deqd;
957 } while (num_total_deqd < num_enqd);
961 /* Free compress private xforms */
962 for (i = 0; i < num_priv_xforms; i++) {
963 rte_compressdev_private_xform_free(0, priv_xforms[i]);
964 priv_xforms[i] = NULL;
969 for (i = 0; i < num_bufs; i++) {
970 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
971 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
972 const struct rte_comp_compress_xform *compress_xform =
973 &compress_xforms[xform_idx]->compress;
974 enum rte_comp_huffman huffman_type =
975 compress_xform->deflate.huffman;
976 char engine[] = "zlib (directly, not PMD)";
977 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
978 strlcpy(engine, "PMD", sizeof(engine));
980 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
981 " %u bytes (level = %d, huffman = %s)\n",
982 buf_idx[priv_data->orig_idx], engine,
983 ops_processed[i]->consumed, ops_processed[i]->produced,
984 compress_xform->level,
985 huffman_type_strings[huffman_type]);
986 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
987 ops_processed[i]->consumed == 0 ? 0 :
988 (float)ops_processed[i]->produced /
989 ops_processed[i]->consumed * 100);
990 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
991 compress_checksum[i] = ops_processed[i]->output_chksum;
996 * Check operation status and free source mbufs (destination mbuf and
997 * compress operation information is needed for the decompression stage)
999 for (i = 0; i < num_bufs; i++) {
1000 if (out_of_space && oos_zlib_decompress) {
1001 if (ops_processed[i]->status !=
1002 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1006 "Operation without expected out of "
1007 "space status error\n");
1013 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1015 "Some operations were not successful\n");
1018 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1019 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1020 uncomp_bufs[priv_data->orig_idx] = NULL;
1023 if (out_of_space && oos_zlib_decompress) {
1028 /* Allocate buffers for decompressed data */
1029 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1032 "Destination mbufs could not be allocated "
1033 "from the mempool\n");
1037 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1038 for (i = 0; i < num_bufs; i++) {
1039 priv_data = (struct priv_op_data *)
1040 (ops_processed[i] + 1);
1041 if (out_of_space == 1 && oos_zlib_compress)
1042 data_size = OUT_OF_SPACE_BUF;
1045 strlen(test_bufs[priv_data->orig_idx]) + 1;
1047 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1049 big_data ? buf_pool : ts_params->small_mbuf_pool,
1050 big_data ? buf_pool : ts_params->large_mbuf_pool,
1051 big_data ? 0 : MAX_SEGS,
1052 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1058 for (i = 0; i < num_bufs; i++) {
1059 priv_data = (struct priv_op_data *)
1060 (ops_processed[i] + 1);
1061 if (out_of_space == 1 && oos_zlib_compress)
1062 data_size = OUT_OF_SPACE_BUF;
1065 strlen(test_bufs[priv_data->orig_idx]) + 1;
1067 rte_pktmbuf_append(uncomp_bufs[i], data_size);
1071 /* Build the decompression operations */
1072 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1075 "Decompress operations could not be allocated "
1076 "from the mempool\n");
1080 /* Source buffer is the compressed data from the previous operations */
1081 for (i = 0; i < num_bufs; i++) {
1082 ops[i]->m_src = ops_processed[i]->m_dst;
1083 ops[i]->m_dst = uncomp_bufs[i];
1084 ops[i]->src.offset = 0;
1086 * Set the length of the compressed data to the
1087 * number of bytes that were produced in the previous stage
1089 ops[i]->src.length = ops_processed[i]->produced;
1090 ops[i]->dst.offset = 0;
1091 if (state == RTE_COMP_OP_STATELESS) {
1092 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1095 "Stateful operations are not supported "
1096 "in these tests yet\n");
1099 ops[i]->input_chksum = 0;
1101 * Copy private data from previous operations,
1102 * to keep the pointer to the original buffer
1104 memcpy(ops[i] + 1, ops_processed[i] + 1,
1105 sizeof(struct priv_op_data));
1109 * Free the previous compress operations,
1110 * as they are not needed anymore
1112 rte_comp_op_bulk_free(ops_processed, num_bufs);
1114 /* Decompress data (either with Zlib API or compressdev API */
1115 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1116 for (i = 0; i < num_bufs; i++) {
1117 priv_data = (struct priv_op_data *)(ops[i] + 1);
1118 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1119 const struct rte_comp_xform *decompress_xform =
1120 decompress_xforms[xform_idx];
1122 ret = decompress_zlib(ops[i], decompress_xform);
1126 ops_processed[i] = ops[i];
1129 /* Create decompress private xform data */
1130 for (i = 0; i < num_xforms; i++) {
1131 ret = rte_compressdev_private_xform_create(0,
1132 (const struct rte_comp_xform *)decompress_xforms[i],
1136 "Decompression private xform "
1137 "could not be created\n");
1143 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1144 /* Attach shareable private xform data to ops */
1145 for (i = 0; i < num_bufs; i++) {
1146 priv_data = (struct priv_op_data *)(ops[i] + 1);
1147 uint16_t xform_idx = priv_data->orig_idx %
1149 ops[i]->private_xform = priv_xforms[xform_idx];
1152 /* Create rest of the private xforms for the other ops */
1153 for (i = num_xforms; i < num_bufs; i++) {
1154 ret = rte_compressdev_private_xform_create(0,
1155 decompress_xforms[i % num_xforms],
1159 "Decompression private xform "
1160 "could not be created\n");
1166 /* Attach non shareable private xform data to ops */
1167 for (i = 0; i < num_bufs; i++) {
1168 priv_data = (struct priv_op_data *)(ops[i] + 1);
1169 uint16_t xform_idx = priv_data->orig_idx;
1170 ops[i]->private_xform = priv_xforms[xform_idx];
1174 /* Enqueue and dequeue all operations */
1175 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1176 if (num_enqd < num_bufs) {
1178 "The operations could not be enqueued\n");
1185 * If retrying a dequeue call, wait for 10 ms to allow
1186 * enough time to the driver to process the operations
1188 if (deqd_retries != 0) {
1190 * Avoid infinite loop if not all the
1191 * operations get out of the device
1193 if (deqd_retries == MAX_DEQD_RETRIES) {
1195 "Not all operations could be "
1199 usleep(DEQUEUE_WAIT_TIME);
1201 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1202 &ops_processed[num_total_deqd], num_bufs);
1203 num_total_deqd += num_deqd;
1205 } while (num_total_deqd < num_enqd);
1210 for (i = 0; i < num_bufs; i++) {
1211 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1212 char engine[] = "zlib, (directly, no PMD)";
1213 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1214 strlcpy(engine, "pmd", sizeof(engine));
1215 RTE_LOG(DEBUG, USER1,
1216 "Buffer %u decompressed by %s from %u to %u bytes\n",
1217 buf_idx[priv_data->orig_idx], engine,
1218 ops_processed[i]->consumed, ops_processed[i]->produced);
1223 * Check operation status and free source mbuf (destination mbuf and
1224 * compress operation information is still needed)
1226 for (i = 0; i < num_bufs; i++) {
1227 if (out_of_space && oos_zlib_compress) {
1228 if (ops_processed[i]->status !=
1229 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1233 "Operation without expected out of "
1234 "space status error\n");
1240 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1242 "Some operations were not successful\n");
1245 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1246 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1247 comp_bufs[priv_data->orig_idx] = NULL;
1250 if (out_of_space && oos_zlib_compress) {
1256 * Compare the original stream with the decompressed stream
1257 * (in size and the data)
1259 for (i = 0; i < num_bufs; i++) {
1260 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1261 const char *buf1 = test_bufs[priv_data->orig_idx];
1263 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1264 if (contig_buf == NULL) {
1265 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1270 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1271 ops_processed[i]->produced, contig_buf);
1272 if (compare_buffers(buf1, strlen(buf1) + 1,
1273 buf2, ops_processed[i]->produced) < 0)
1276 /* Test checksums */
1277 if (compress_xforms[0]->compress.chksum !=
1278 RTE_COMP_CHECKSUM_NONE) {
1279 if (ops_processed[i]->output_chksum !=
1280 compress_checksum[i]) {
1281 RTE_LOG(ERR, USER1, "The checksums differ\n"
1282 "Compression Checksum: %" PRIu64 "\tDecompression "
1283 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1284 ops_processed[i]->output_chksum);
1289 rte_free(contig_buf);
1296 /* Free resources */
1297 for (i = 0; i < num_bufs; i++) {
1298 rte_pktmbuf_free(uncomp_bufs[i]);
1299 rte_pktmbuf_free(comp_bufs[i]);
1300 rte_comp_op_free(ops[i]);
1301 rte_comp_op_free(ops_processed[i]);
1303 for (i = 0; i < num_priv_xforms; i++) {
1304 if (priv_xforms[i] != NULL)
1305 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1307 rte_free(contig_buf);
1313 test_compressdev_deflate_stateless_fixed(void)
1315 struct comp_testsuite_params *ts_params = &testsuite_params;
1318 const struct rte_compressdev_capabilities *capab;
1320 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1321 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1323 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1326 struct rte_comp_xform *compress_xform =
1327 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1329 if (compress_xform == NULL) {
1331 "Compress xform could not be created\n");
1336 memcpy(compress_xform, ts_params->def_comp_xform,
1337 sizeof(struct rte_comp_xform));
1338 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1340 struct interim_data_params int_data = {
1345 &ts_params->def_decomp_xform,
1349 struct test_data_params test_data = {
1350 RTE_COMP_OP_STATELESS,
1357 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1358 int_data.test_bufs = &compress_test_bufs[i];
1359 int_data.buf_idx = &i;
1361 /* Compress with compressdev, decompress with Zlib */
1362 test_data.zlib_dir = ZLIB_DECOMPRESS;
1363 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1368 /* Compress with Zlib, decompress with compressdev */
1369 test_data.zlib_dir = ZLIB_COMPRESS;
1370 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1379 rte_free(compress_xform);
1384 test_compressdev_deflate_stateless_dynamic(void)
1386 struct comp_testsuite_params *ts_params = &testsuite_params;
1389 struct rte_comp_xform *compress_xform =
1390 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1392 const struct rte_compressdev_capabilities *capab;
1394 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1395 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1397 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1400 if (compress_xform == NULL) {
1402 "Compress xform could not be created\n");
1407 memcpy(compress_xform, ts_params->def_comp_xform,
1408 sizeof(struct rte_comp_xform));
1409 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1411 struct interim_data_params int_data = {
1416 &ts_params->def_decomp_xform,
1420 struct test_data_params test_data = {
1421 RTE_COMP_OP_STATELESS,
1428 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1429 int_data.test_bufs = &compress_test_bufs[i];
1430 int_data.buf_idx = &i;
1432 /* Compress with compressdev, decompress with Zlib */
1433 test_data.zlib_dir = ZLIB_DECOMPRESS;
1434 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1439 /* Compress with Zlib, decompress with compressdev */
1440 test_data.zlib_dir = ZLIB_COMPRESS;
1441 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1450 rte_free(compress_xform);
1455 test_compressdev_deflate_stateless_multi_op(void)
1457 struct comp_testsuite_params *ts_params = &testsuite_params;
1458 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1459 uint16_t buf_idx[num_bufs];
1462 for (i = 0; i < num_bufs; i++)
1465 struct interim_data_params int_data = {
1469 &ts_params->def_comp_xform,
1470 &ts_params->def_decomp_xform,
1474 struct test_data_params test_data = {
1475 RTE_COMP_OP_STATELESS,
1482 /* Compress with compressdev, decompress with Zlib */
1483 test_data.zlib_dir = ZLIB_DECOMPRESS;
1484 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1487 /* Compress with Zlib, decompress with compressdev */
1488 test_data.zlib_dir = ZLIB_COMPRESS;
1489 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1492 return TEST_SUCCESS;
1496 test_compressdev_deflate_stateless_multi_level(void)
1498 struct comp_testsuite_params *ts_params = &testsuite_params;
1502 struct rte_comp_xform *compress_xform =
1503 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1505 if (compress_xform == NULL) {
1507 "Compress xform could not be created\n");
1512 memcpy(compress_xform, ts_params->def_comp_xform,
1513 sizeof(struct rte_comp_xform));
1515 struct interim_data_params int_data = {
1520 &ts_params->def_decomp_xform,
1524 struct test_data_params test_data = {
1525 RTE_COMP_OP_STATELESS,
1532 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1533 int_data.test_bufs = &compress_test_bufs[i];
1534 int_data.buf_idx = &i;
1536 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1538 compress_xform->compress.level = level;
1539 /* Compress with compressdev, decompress with Zlib */
1540 test_data.zlib_dir = ZLIB_DECOMPRESS;
1541 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1551 rte_free(compress_xform);
1555 #define NUM_XFORMS 3
1557 test_compressdev_deflate_stateless_multi_xform(void)
1559 struct comp_testsuite_params *ts_params = &testsuite_params;
1560 uint16_t num_bufs = NUM_XFORMS;
1561 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1562 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1563 const char *test_buffers[NUM_XFORMS];
1565 unsigned int level = RTE_COMP_LEVEL_MIN;
1566 uint16_t buf_idx[num_bufs];
1570 /* Create multiple xforms with various levels */
1571 for (i = 0; i < NUM_XFORMS; i++) {
1572 compress_xforms[i] = rte_malloc(NULL,
1573 sizeof(struct rte_comp_xform), 0);
1574 if (compress_xforms[i] == NULL) {
1576 "Compress xform could not be created\n");
1581 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1582 sizeof(struct rte_comp_xform));
1583 compress_xforms[i]->compress.level = level;
1586 decompress_xforms[i] = rte_malloc(NULL,
1587 sizeof(struct rte_comp_xform), 0);
1588 if (decompress_xforms[i] == NULL) {
1590 "Decompress xform could not be created\n");
1595 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1596 sizeof(struct rte_comp_xform));
1599 for (i = 0; i < NUM_XFORMS; i++) {
1601 /* Use the same buffer in all sessions */
1602 test_buffers[i] = compress_test_bufs[0];
1605 struct interim_data_params int_data = {
1614 struct test_data_params test_data = {
1615 RTE_COMP_OP_STATELESS,
1622 /* Compress with compressdev, decompress with Zlib */
1623 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1630 for (i = 0; i < NUM_XFORMS; i++) {
1631 rte_free(compress_xforms[i]);
1632 rte_free(decompress_xforms[i]);
1639 test_compressdev_deflate_stateless_sgl(void)
1641 struct comp_testsuite_params *ts_params = &testsuite_params;
1643 const struct rte_compressdev_capabilities *capab;
1645 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1646 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1648 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1651 struct interim_data_params int_data = {
1655 &ts_params->def_comp_xform,
1656 &ts_params->def_decomp_xform,
1660 struct test_data_params test_data = {
1661 RTE_COMP_OP_STATELESS,
1668 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1669 int_data.test_bufs = &compress_test_bufs[i];
1670 int_data.buf_idx = &i;
1672 /* Compress with compressdev, decompress with Zlib */
1673 test_data.zlib_dir = ZLIB_DECOMPRESS;
1674 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1677 /* Compress with Zlib, decompress with compressdev */
1678 test_data.zlib_dir = ZLIB_COMPRESS;
1679 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1682 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1683 /* Compress with compressdev, decompress with Zlib */
1684 test_data.zlib_dir = ZLIB_DECOMPRESS;
1685 test_data.buff_type = SGL_TO_LB;
1686 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1689 /* Compress with Zlib, decompress with compressdev */
1690 test_data.zlib_dir = ZLIB_COMPRESS;
1691 test_data.buff_type = SGL_TO_LB;
1692 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1696 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1697 /* Compress with compressdev, decompress with Zlib */
1698 test_data.zlib_dir = ZLIB_DECOMPRESS;
1699 test_data.buff_type = LB_TO_SGL;
1700 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1703 /* Compress with Zlib, decompress with compressdev */
1704 test_data.zlib_dir = ZLIB_COMPRESS;
1705 test_data.buff_type = LB_TO_SGL;
1706 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1713 return TEST_SUCCESS;
1718 test_compressdev_deflate_stateless_checksum(void)
1720 struct comp_testsuite_params *ts_params = &testsuite_params;
1723 const struct rte_compressdev_capabilities *capab;
1725 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1726 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1728 /* Check if driver supports any checksum */
1729 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1730 (capab->comp_feature_flags &
1731 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1732 (capab->comp_feature_flags &
1733 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1736 struct rte_comp_xform *compress_xform =
1737 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1738 if (compress_xform == NULL) {
1739 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1744 memcpy(compress_xform, ts_params->def_comp_xform,
1745 sizeof(struct rte_comp_xform));
1747 struct rte_comp_xform *decompress_xform =
1748 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1749 if (decompress_xform == NULL) {
1750 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1751 rte_free(compress_xform);
1756 memcpy(decompress_xform, ts_params->def_decomp_xform,
1757 sizeof(struct rte_comp_xform));
1759 struct interim_data_params int_data = {
1768 struct test_data_params test_data = {
1769 RTE_COMP_OP_STATELESS,
1776 /* Check if driver supports crc32 checksum and test */
1777 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1778 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1779 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1781 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1782 /* Compress with compressdev, decompress with Zlib */
1783 int_data.test_bufs = &compress_test_bufs[i];
1784 int_data.buf_idx = &i;
1786 /* Generate zlib checksum and test against selected
1787 * drivers decompression checksum
1789 test_data.zlib_dir = ZLIB_COMPRESS;
1790 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1795 /* Generate compression and decompression
1796 * checksum of selected driver
1798 test_data.zlib_dir = ZLIB_NONE;
1799 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1806 /* Check if driver supports adler32 checksum and test */
1807 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1808 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1809 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1811 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1812 int_data.test_bufs = &compress_test_bufs[i];
1813 int_data.buf_idx = &i;
1815 /* Generate zlib checksum and test against selected
1816 * drivers decompression checksum
1818 test_data.zlib_dir = ZLIB_COMPRESS;
1819 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1823 /* Generate compression and decompression
1824 * checksum of selected driver
1826 test_data.zlib_dir = ZLIB_NONE;
1827 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1834 /* Check if driver supports combined crc and adler checksum and test */
1835 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1836 compress_xform->compress.chksum =
1837 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1838 decompress_xform->decompress.chksum =
1839 RTE_COMP_CHECKSUM_CRC32_ADLER32;
1841 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1842 int_data.test_bufs = &compress_test_bufs[i];
1843 int_data.buf_idx = &i;
1845 /* Generate compression and decompression
1846 * checksum of selected driver
1848 test_data.zlib_dir = ZLIB_NONE;
1849 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1859 rte_free(compress_xform);
1860 rte_free(decompress_xform);
1865 test_compressdev_out_of_space_buffer(void)
1867 struct comp_testsuite_params *ts_params = &testsuite_params;
1870 const struct rte_compressdev_capabilities *capab;
1872 RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1874 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1875 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1877 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1880 struct rte_comp_xform *compress_xform =
1881 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1883 if (compress_xform == NULL) {
1885 "Compress xform could not be created\n");
1890 struct interim_data_params int_data = {
1891 &compress_test_bufs[0],
1894 &ts_params->def_comp_xform,
1895 &ts_params->def_decomp_xform,
1899 struct test_data_params test_data = {
1900 RTE_COMP_OP_STATELESS,
1906 /* Compress with compressdev, decompress with Zlib */
1907 test_data.zlib_dir = ZLIB_DECOMPRESS;
1908 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1913 /* Compress with Zlib, decompress with compressdev */
1914 test_data.zlib_dir = ZLIB_COMPRESS;
1915 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1920 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1921 /* Compress with compressdev, decompress with Zlib */
1922 test_data.zlib_dir = ZLIB_DECOMPRESS;
1923 test_data.buff_type = SGL_BOTH;
1924 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1929 /* Compress with Zlib, decompress with compressdev */
1930 test_data.zlib_dir = ZLIB_COMPRESS;
1931 test_data.buff_type = SGL_BOTH;
1932 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1941 rte_free(compress_xform);
1946 test_compressdev_deflate_stateless_dynamic_big(void)
1948 struct comp_testsuite_params *ts_params = &testsuite_params;
1950 int ret = TEST_SUCCESS;
1952 const struct rte_compressdev_capabilities *capab;
1953 char *test_buffer = NULL;
1955 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1956 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1958 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1961 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1964 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1965 if (test_buffer == NULL) {
1967 "Can't allocate buffer for big-data\n");
1971 struct interim_data_params int_data = {
1972 (const char * const *)&test_buffer,
1975 &ts_params->def_comp_xform,
1976 &ts_params->def_decomp_xform,
1980 struct test_data_params test_data = {
1981 RTE_COMP_OP_STATELESS,
1988 ts_params->def_comp_xform->compress.deflate.huffman =
1989 RTE_COMP_HUFFMAN_DYNAMIC;
1991 /* fill the buffer with data based on rand. data */
1992 srand(BIG_DATA_TEST_SIZE);
1993 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
1994 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
1995 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
1997 /* Compress with compressdev, decompress with Zlib */
1998 test_data.zlib_dir = ZLIB_DECOMPRESS;
1999 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2004 /* Compress with Zlib, decompress with compressdev */
2005 test_data.zlib_dir = ZLIB_COMPRESS;
2006 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2012 ts_params->def_comp_xform->compress.deflate.huffman =
2013 RTE_COMP_HUFFMAN_DEFAULT;
2014 rte_free(test_buffer);
2019 static struct unit_test_suite compressdev_testsuite = {
2020 .suite_name = "compressdev unit test suite",
2021 .setup = testsuite_setup,
2022 .teardown = testsuite_teardown,
2023 .unit_test_cases = {
2024 TEST_CASE_ST(NULL, NULL,
2025 test_compressdev_invalid_configuration),
2026 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2027 test_compressdev_deflate_stateless_fixed),
2028 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2029 test_compressdev_deflate_stateless_dynamic),
2030 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2031 test_compressdev_deflate_stateless_dynamic_big),
2032 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2033 test_compressdev_deflate_stateless_multi_op),
2034 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2035 test_compressdev_deflate_stateless_multi_level),
2036 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2037 test_compressdev_deflate_stateless_multi_xform),
2038 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2039 test_compressdev_deflate_stateless_sgl),
2040 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2041 test_compressdev_deflate_stateless_checksum),
2042 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2043 test_compressdev_out_of_space_buffer),
2044 TEST_CASES_END() /**< NULL terminate unit test array */
2049 test_compressdev(void)
2051 return unit_test_suite_runner(&compressdev_testsuite);
2054 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);