1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
15 #include <rte_compressdev.h>
16 #include <rte_string_fns.h>
18 #include "test_compressdev_test_buffer.h"
21 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
23 #define DEFAULT_WINDOW_SIZE 15
24 #define DEFAULT_MEM_LEVEL 8
25 #define MAX_DEQD_RETRIES 10
26 #define DEQUEUE_WAIT_TIME 10000
29 * 30% extra size for compressed data compared to original data,
30 * in case data size cannot be reduced and it is actually bigger
31 * due to the compress block headers
33 #define COMPRESS_BUF_SIZE_RATIO 1.3
34 #define COMPRESS_BUF_SIZE_RATIO_DISABLED 1.0
35 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2
36 #define NUM_LARGE_MBUFS 16
37 #define SMALL_SEG_SIZE 256
40 #define NUM_MAX_XFORMS 16
41 #define NUM_MAX_INFLIGHT_OPS 128
44 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
45 #define ZLIB_HEADER_SIZE 2
46 #define ZLIB_TRAILER_SIZE 4
47 #define GZIP_HEADER_SIZE 10
48 #define GZIP_TRAILER_SIZE 8
50 #define OUT_OF_SPACE_BUF 1
52 #define MAX_MBUF_SEGMENT_SIZE 65535
53 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
54 #define NUM_BIG_MBUFS (512 + 1)
55 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * 2)
57 /* constants for "im buffer" tests start here */
59 /* number of mbufs lower than number of inflight ops */
60 #define IM_BUF_NUM_MBUFS 3
61 /* above threshold (QAT_FALLBACK_THLD) and below max mbuf size */
62 #define IM_BUF_DATA_TEST_SIZE_LB 59600
63 /* data size smaller than the queue capacity */
64 #define IM_BUF_DATA_TEST_SIZE_SGL (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS)
65 /* number of mbufs bigger than number of inflight ops */
66 #define IM_BUF_NUM_MBUFS_OVER (NUM_MAX_INFLIGHT_OPS + 1)
67 /* data size bigger than the queue capacity */
68 #define IM_BUF_DATA_TEST_SIZE_OVER (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_OVER)
69 /* number of mid-size mbufs */
70 #define IM_BUF_NUM_MBUFS_MID ((NUM_MAX_INFLIGHT_OPS / 3) + 1)
71 /* capacity of mid-size mbufs */
72 #define IM_BUF_DATA_TEST_SIZE_MID (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_MID)
76 huffman_type_strings[] = {
77 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
78 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
79 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
90 LB_BOTH = 0, /* both input and output are linear*/
91 SGL_BOTH, /* both input and output are chained */
92 SGL_TO_LB, /* input buffer is chained */
93 LB_TO_SGL /* output buffer is chained */
106 enum operation_type {
107 OPERATION_COMPRESSION,
108 OPERATION_DECOMPRESSION
111 struct priv_op_data {
115 struct comp_testsuite_params {
116 struct rte_mempool *large_mbuf_pool;
117 struct rte_mempool *small_mbuf_pool;
118 struct rte_mempool *big_mbuf_pool;
119 struct rte_mempool *op_pool;
120 struct rte_comp_xform *def_comp_xform;
121 struct rte_comp_xform *def_decomp_xform;
124 struct interim_data_params {
125 const char * const *test_bufs;
126 unsigned int num_bufs;
128 struct rte_comp_xform **compress_xforms;
129 struct rte_comp_xform **decompress_xforms;
130 unsigned int num_xforms;
133 struct test_data_params {
134 enum rte_comp_op_type compress_state;
135 enum rte_comp_op_type decompress_state;
136 enum varied_buff buff_type;
137 enum zlib_direction zlib_dir;
138 unsigned int out_of_space;
139 unsigned int big_data;
140 /* stateful decompression specific parameters */
141 unsigned int decompress_output_block_size;
142 unsigned int decompress_steps_max;
143 /* external mbufs specific parameters */
144 unsigned int use_external_mbufs;
145 unsigned int inbuf_data_size;
146 const struct rte_memzone *inbuf_memzone;
147 const struct rte_memzone *compbuf_memzone;
148 const struct rte_memzone *uncompbuf_memzone;
149 /* overflow test activation */
150 enum overflow_test overflow;
151 enum ratio_switch ratio;
154 struct test_private_arrays {
155 struct rte_mbuf **uncomp_bufs;
156 struct rte_mbuf **comp_bufs;
157 struct rte_comp_op **ops;
158 struct rte_comp_op **ops_processed;
160 uint64_t *compress_checksum;
161 uint32_t *compressed_data_size;
163 char **all_decomp_data;
164 unsigned int *decomp_produced_data_size;
165 uint16_t num_priv_xforms;
168 static struct comp_testsuite_params testsuite_params = { 0 };
172 testsuite_teardown(void)
174 struct comp_testsuite_params *ts_params = &testsuite_params;
176 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
177 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
178 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
179 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
180 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
181 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
182 if (rte_mempool_in_use_count(ts_params->op_pool))
183 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
185 rte_mempool_free(ts_params->large_mbuf_pool);
186 rte_mempool_free(ts_params->small_mbuf_pool);
187 rte_mempool_free(ts_params->big_mbuf_pool);
188 rte_mempool_free(ts_params->op_pool);
189 rte_free(ts_params->def_comp_xform);
190 rte_free(ts_params->def_decomp_xform);
194 testsuite_setup(void)
196 struct comp_testsuite_params *ts_params = &testsuite_params;
197 uint32_t max_buf_size = 0;
200 if (rte_compressdev_count() == 0) {
201 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
205 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
206 rte_compressdev_name_get(0));
208 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
209 max_buf_size = RTE_MAX(max_buf_size,
210 strlen(compress_test_bufs[i]) + 1);
213 * Buffers to be used in compression and decompression.
214 * Since decompressed data might be larger than
215 * compressed data (due to block header),
216 * buffers should be big enough for both cases.
218 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
219 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
222 max_buf_size + RTE_PKTMBUF_HEADROOM,
224 if (ts_params->large_mbuf_pool == NULL) {
225 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
229 /* Create mempool with smaller buffers for SGL testing */
230 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
231 NUM_LARGE_MBUFS * MAX_SEGS,
233 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
235 if (ts_params->small_mbuf_pool == NULL) {
236 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
240 /* Create mempool with big buffers for SGL testing */
241 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
244 MAX_MBUF_SEGMENT_SIZE,
246 if (ts_params->big_mbuf_pool == NULL) {
247 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
251 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
252 0, sizeof(struct priv_op_data),
254 if (ts_params->op_pool == NULL) {
255 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
259 ts_params->def_comp_xform =
260 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
261 if (ts_params->def_comp_xform == NULL) {
263 "Default compress xform could not be created\n");
266 ts_params->def_decomp_xform =
267 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
268 if (ts_params->def_decomp_xform == NULL) {
270 "Default decompress xform could not be created\n");
274 /* Initializes default values for compress/decompress xforms */
275 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
276 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
277 ts_params->def_comp_xform->compress.deflate.huffman =
278 RTE_COMP_HUFFMAN_DEFAULT;
279 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
280 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
281 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
283 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
284 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
285 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
286 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
291 testsuite_teardown();
297 generic_ut_setup(void)
299 /* Configure compressdev (one device, one queue pair) */
300 struct rte_compressdev_config config = {
301 .socket_id = rte_socket_id(),
303 .max_nb_priv_xforms = NUM_MAX_XFORMS,
307 if (rte_compressdev_configure(0, &config) < 0) {
308 RTE_LOG(ERR, USER1, "Device configuration failed\n");
312 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
313 rte_socket_id()) < 0) {
314 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
318 if (rte_compressdev_start(0) < 0) {
319 RTE_LOG(ERR, USER1, "Device could not be started\n");
327 generic_ut_teardown(void)
329 rte_compressdev_stop(0);
330 if (rte_compressdev_close(0) < 0)
331 RTE_LOG(ERR, USER1, "Device could not be closed\n");
335 test_compressdev_invalid_configuration(void)
337 struct rte_compressdev_config invalid_config;
338 struct rte_compressdev_config valid_config = {
339 .socket_id = rte_socket_id(),
341 .max_nb_priv_xforms = NUM_MAX_XFORMS,
344 struct rte_compressdev_info dev_info;
346 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
348 /* Invalid configuration with 0 queue pairs */
349 memcpy(&invalid_config, &valid_config,
350 sizeof(struct rte_compressdev_config));
351 invalid_config.nb_queue_pairs = 0;
353 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
354 "Device configuration was successful "
355 "with no queue pairs (invalid)\n");
358 * Invalid configuration with too many queue pairs
359 * (if there is an actual maximum number of queue pairs)
361 rte_compressdev_info_get(0, &dev_info);
362 if (dev_info.max_nb_queue_pairs != 0) {
363 memcpy(&invalid_config, &valid_config,
364 sizeof(struct rte_compressdev_config));
365 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
367 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
368 "Device configuration was successful "
369 "with too many queue pairs (invalid)\n");
372 /* Invalid queue pair setup, with no number of queue pairs set */
373 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
374 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
375 "Queue pair setup was successful "
376 "with no queue pairs set (invalid)\n");
382 compare_buffers(const char *buffer1, uint32_t buffer1_len,
383 const char *buffer2, uint32_t buffer2_len)
385 if (buffer1_len != buffer2_len) {
386 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
390 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
391 RTE_LOG(ERR, USER1, "Buffers are different\n");
399 * Maps compressdev and Zlib flush flags
402 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
405 case RTE_COMP_FLUSH_NONE:
407 case RTE_COMP_FLUSH_SYNC:
409 case RTE_COMP_FLUSH_FULL:
411 case RTE_COMP_FLUSH_FINAL:
414 * There should be only the values above,
415 * so this should never happen
423 compress_zlib(struct rte_comp_op *op,
424 const struct rte_comp_xform *xform, int mem_level)
428 int strategy, window_bits, comp_level;
429 int ret = TEST_FAILED;
430 uint8_t *single_src_buf = NULL;
431 uint8_t *single_dst_buf = NULL;
433 /* initialize zlib stream */
434 stream.zalloc = Z_NULL;
435 stream.zfree = Z_NULL;
436 stream.opaque = Z_NULL;
438 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
441 strategy = Z_DEFAULT_STRATEGY;
444 * Window bits is the base two logarithm of the window size (in bytes).
445 * When doing raw DEFLATE, this number will be negative.
447 window_bits = -(xform->compress.window_size);
448 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
450 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
451 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
453 comp_level = xform->compress.level;
455 if (comp_level != RTE_COMP_LEVEL_NONE)
456 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
457 window_bits, mem_level, strategy);
459 ret = deflateInit(&stream, Z_NO_COMPRESSION);
462 printf("Zlib deflate could not be initialized\n");
466 /* Assuming stateless operation */
468 if (op->m_src->nb_segs > 1) {
469 single_src_buf = rte_malloc(NULL,
470 rte_pktmbuf_pkt_len(op->m_src), 0);
471 if (single_src_buf == NULL) {
472 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
476 if (rte_pktmbuf_read(op->m_src, op->src.offset,
477 rte_pktmbuf_pkt_len(op->m_src) -
479 single_src_buf) == NULL) {
481 "Buffer could not be read entirely\n");
485 stream.avail_in = op->src.length;
486 stream.next_in = single_src_buf;
489 stream.avail_in = op->src.length;
490 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
494 if (op->m_dst->nb_segs > 1) {
496 single_dst_buf = rte_malloc(NULL,
497 rte_pktmbuf_pkt_len(op->m_dst), 0);
498 if (single_dst_buf == NULL) {
500 "Buffer could not be allocated\n");
504 stream.avail_out = op->m_dst->pkt_len;
505 stream.next_out = single_dst_buf;
507 } else {/* linear output */
508 stream.avail_out = op->m_dst->data_len;
509 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
513 /* Stateless operation, all buffer will be compressed in one go */
514 zlib_flush = map_zlib_flush_flag(op->flush_flag);
515 ret = deflate(&stream, zlib_flush);
517 if (stream.avail_in != 0) {
518 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
522 if (ret != Z_STREAM_END)
525 /* Copy data to destination SGL */
526 if (op->m_dst->nb_segs > 1) {
527 uint32_t remaining_data = stream.total_out;
528 uint8_t *src_data = single_dst_buf;
529 struct rte_mbuf *dst_buf = op->m_dst;
531 while (remaining_data > 0) {
532 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
533 uint8_t *, op->dst.offset);
535 if (remaining_data < dst_buf->data_len) {
536 memcpy(dst_data, src_data, remaining_data);
539 memcpy(dst_data, src_data, dst_buf->data_len);
540 remaining_data -= dst_buf->data_len;
541 src_data += dst_buf->data_len;
542 dst_buf = dst_buf->next;
547 op->consumed = stream.total_in;
548 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
549 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
550 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
551 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
553 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
554 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
555 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
556 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
559 op->produced = stream.total_out;
561 op->status = RTE_COMP_OP_STATUS_SUCCESS;
562 op->output_chksum = stream.adler;
564 deflateReset(&stream);
569 rte_free(single_src_buf);
570 rte_free(single_dst_buf);
576 decompress_zlib(struct rte_comp_op *op,
577 const struct rte_comp_xform *xform)
582 int ret = TEST_FAILED;
583 uint8_t *single_src_buf = NULL;
584 uint8_t *single_dst_buf = NULL;
586 /* initialize zlib stream */
587 stream.zalloc = Z_NULL;
588 stream.zfree = Z_NULL;
589 stream.opaque = Z_NULL;
592 * Window bits is the base two logarithm of the window size (in bytes).
593 * When doing raw DEFLATE, this number will be negative.
595 window_bits = -(xform->decompress.window_size);
596 ret = inflateInit2(&stream, window_bits);
599 printf("Zlib deflate could not be initialized\n");
603 /* Assuming stateless operation */
605 if (op->m_src->nb_segs > 1) {
606 single_src_buf = rte_malloc(NULL,
607 rte_pktmbuf_pkt_len(op->m_src), 0);
608 if (single_src_buf == NULL) {
609 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
612 single_dst_buf = rte_malloc(NULL,
613 rte_pktmbuf_pkt_len(op->m_dst), 0);
614 if (single_dst_buf == NULL) {
615 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
618 if (rte_pktmbuf_read(op->m_src, 0,
619 rte_pktmbuf_pkt_len(op->m_src),
620 single_src_buf) == NULL) {
622 "Buffer could not be read entirely\n");
626 stream.avail_in = op->src.length;
627 stream.next_in = single_src_buf;
628 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
629 stream.next_out = single_dst_buf;
632 stream.avail_in = op->src.length;
633 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
634 stream.avail_out = op->m_dst->data_len;
635 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
638 /* Stateless operation, all buffer will be compressed in one go */
639 zlib_flush = map_zlib_flush_flag(op->flush_flag);
640 ret = inflate(&stream, zlib_flush);
642 if (stream.avail_in != 0) {
643 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
647 if (ret != Z_STREAM_END)
650 if (op->m_src->nb_segs > 1) {
651 uint32_t remaining_data = stream.total_out;
652 uint8_t *src_data = single_dst_buf;
653 struct rte_mbuf *dst_buf = op->m_dst;
655 while (remaining_data > 0) {
656 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
659 if (remaining_data < dst_buf->data_len) {
660 memcpy(dst_data, src_data, remaining_data);
663 memcpy(dst_data, src_data, dst_buf->data_len);
664 remaining_data -= dst_buf->data_len;
665 src_data += dst_buf->data_len;
666 dst_buf = dst_buf->next;
671 op->consumed = stream.total_in;
672 op->produced = stream.total_out;
673 op->status = RTE_COMP_OP_STATUS_SUCCESS;
675 inflateReset(&stream);
685 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
686 uint32_t total_data_size,
687 struct rte_mempool *small_mbuf_pool,
688 struct rte_mempool *large_mbuf_pool,
689 uint8_t limit_segs_in_sgl,
692 uint32_t remaining_data = total_data_size;
693 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
694 struct rte_mempool *pool;
695 struct rte_mbuf *next_seg;
698 const char *data_ptr = test_buf;
702 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
703 num_remaining_segs = limit_segs_in_sgl - 1;
706 * Allocate data in the first segment (header) and
707 * copy data if test buffer is provided
709 if (remaining_data < seg_size)
710 data_size = remaining_data;
712 data_size = seg_size;
714 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
715 if (buf_ptr == NULL) {
717 "Not enough space in the 1st buffer\n");
721 if (data_ptr != NULL) {
722 /* Copy characters without NULL terminator */
723 memcpy(buf_ptr, data_ptr, data_size);
724 data_ptr += data_size;
726 remaining_data -= data_size;
727 num_remaining_segs--;
730 * Allocate the rest of the segments,
731 * copy the rest of the data and chain the segments.
733 for (i = 0; i < num_remaining_segs; i++) {
735 if (i == (num_remaining_segs - 1)) {
737 if (remaining_data > seg_size)
738 pool = large_mbuf_pool;
740 pool = small_mbuf_pool;
741 data_size = remaining_data;
743 data_size = seg_size;
744 pool = small_mbuf_pool;
747 next_seg = rte_pktmbuf_alloc(pool);
748 if (next_seg == NULL) {
750 "New segment could not be allocated "
751 "from the mempool\n");
754 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
755 if (buf_ptr == NULL) {
757 "Not enough space in the buffer\n");
758 rte_pktmbuf_free(next_seg);
761 if (data_ptr != NULL) {
762 /* Copy characters without NULL terminator */
763 memcpy(buf_ptr, data_ptr, data_size);
764 data_ptr += data_size;
766 remaining_data -= data_size;
768 ret = rte_pktmbuf_chain(head_buf, next_seg);
770 rte_pktmbuf_free(next_seg);
772 "Segment could not chained\n");
781 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
786 test_run_enqueue_dequeue(struct rte_comp_op **ops,
787 struct rte_comp_op **ops_processed,
788 unsigned int num_bufs)
790 uint16_t num_enqd, num_deqd, num_total_deqd;
791 unsigned int deqd_retries = 0;
794 /* Enqueue and dequeue all operations */
795 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
796 if (num_enqd < num_bufs) {
798 "Some operations could not be enqueued\n");
802 /* dequeue ops even on error (same number of ops as was enqueued) */
805 while (num_total_deqd < num_enqd) {
807 * If retrying a dequeue call, wait for 10 ms to allow
808 * enough time to the driver to process the operations
810 if (deqd_retries != 0) {
812 * Avoid infinite loop if not all the
813 * operations get out of the device
815 if (deqd_retries == MAX_DEQD_RETRIES) {
817 "Not all operations could be dequeued\n");
821 usleep(DEQUEUE_WAIT_TIME);
823 num_deqd = rte_compressdev_dequeue_burst(0, 0,
824 &ops_processed[num_total_deqd], num_bufs);
825 num_total_deqd += num_deqd;
834 * Arrays initialization. Input buffers preparation for compression.
836 * API that initializes all the private arrays to NULL
837 * and allocates input buffers to perform compression operations.
840 * Interim data containing session/transformation objects.
842 * The test parameters set by users (command line parameters).
843 * @param test_priv_data
844 * A container used for aggregation all the private test arrays.
850 test_setup_com_bufs(const struct interim_data_params *int_data,
851 const struct test_data_params *test_data,
852 const struct test_private_arrays *test_priv_data)
854 /* local variables: */
859 char **all_decomp_data = test_priv_data->all_decomp_data;
861 struct comp_testsuite_params *ts_params = &testsuite_params;
864 const char * const *test_bufs = int_data->test_bufs;
865 unsigned int num_bufs = int_data->num_bufs;
867 /* from test_data: */
868 unsigned int buff_type = test_data->buff_type;
869 unsigned int big_data = test_data->big_data;
871 /* from test_priv_data: */
872 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
873 struct rte_mempool *buf_pool;
875 static struct rte_mbuf_ext_shared_info inbuf_info;
877 size_t array_size = sizeof(void *) * num_bufs;
879 /* Initialize all arrays to NULL */
880 memset(test_priv_data->uncomp_bufs, 0, array_size);
881 memset(test_priv_data->comp_bufs, 0, array_size);
882 memset(test_priv_data->ops, 0, array_size);
883 memset(test_priv_data->ops_processed, 0, array_size);
884 memset(test_priv_data->priv_xforms, 0, array_size);
885 memset(test_priv_data->compressed_data_size,
886 0, sizeof(uint32_t) * num_bufs);
888 if (test_data->decompress_state == RTE_COMP_OP_STATEFUL) {
889 data_size = strlen(test_bufs[0]) + 1;
890 *all_decomp_data = rte_malloc(NULL, data_size,
891 RTE_CACHE_LINE_SIZE);
895 buf_pool = ts_params->big_mbuf_pool;
896 else if (buff_type == SGL_BOTH)
897 buf_pool = ts_params->small_mbuf_pool;
899 buf_pool = ts_params->large_mbuf_pool;
901 /* for compression uncomp_bufs is used as a source buffer */
902 /* allocation from buf_pool (mempool type) */
903 ret = rte_pktmbuf_alloc_bulk(buf_pool,
904 uncomp_bufs, num_bufs);
907 "Source mbufs could not be allocated "
908 "from the mempool\n");
912 if (test_data->use_external_mbufs) {
913 inbuf_info.free_cb = extbuf_free_callback;
914 inbuf_info.fcb_opaque = NULL;
915 rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
916 for (i = 0; i < num_bufs; i++) {
917 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
918 test_data->inbuf_memzone->addr,
919 test_data->inbuf_memzone->iova,
920 test_data->inbuf_data_size,
922 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i],
923 test_data->inbuf_data_size);
924 if (buf_ptr == NULL) {
926 "Append extra bytes to the source mbuf failed\n");
930 } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
931 for (i = 0; i < num_bufs; i++) {
932 data_size = strlen(test_bufs[i]) + 1;
933 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
935 big_data ? buf_pool : ts_params->small_mbuf_pool,
936 big_data ? buf_pool : ts_params->large_mbuf_pool,
937 big_data ? 0 : MAX_SEGS,
938 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
942 for (i = 0; i < num_bufs; i++) {
943 data_size = strlen(test_bufs[i]) + 1;
945 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
946 if (buf_ptr == NULL) {
948 "Append extra bytes to the source mbuf failed\n");
951 strlcpy(buf_ptr, test_bufs[i], data_size);
959 * Data size calculation (for both compression and decompression).
961 * Calculate size of anticipated output buffer required for both
962 * compression and decompression operations based on input int_data.
965 * Operation type: compress or decompress
966 * @param out_of_space_and_zlib
967 * Boolean value to switch into "out of space" buffer if set.
968 * To test "out-of-space" data size, zlib_decompress must be set as well.
969 * @param test_priv_data
970 * A container used for aggregation all the private test arrays.
972 * Interim data containing session/transformation objects.
974 * The test parameters set by users (command line parameters).
976 * current buffer index
980 static inline uint32_t
981 test_mbufs_calculate_data_size(
982 enum operation_type op_type,
983 unsigned int out_of_space_and_zlib,
984 const struct test_private_arrays *test_priv_data,
985 const struct interim_data_params *int_data,
986 const struct test_data_params *test_data,
989 /* local variables: */
991 struct priv_op_data *priv_data;
993 enum ratio_switch ratio = test_data->ratio;
995 uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */
996 enum overflow_test overflow = test_data->overflow;
998 /* from test_priv_data: */
999 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1001 /* from int_data: */
1002 const char * const *test_bufs = int_data->test_bufs;
1004 if (out_of_space_and_zlib)
1005 data_size = OUT_OF_SPACE_BUF;
1007 if (op_type == OPERATION_COMPRESSION) {
1008 not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS
1009 || test_data->zlib_dir == ZLIB_NONE);
1011 ratio_val = (ratio == RATIO_ENABLED) ?
1012 COMPRESS_BUF_SIZE_RATIO :
1013 COMPRESS_BUF_SIZE_RATIO_DISABLED;
1015 ratio_val = (not_zlib_compr &&
1016 (overflow == OVERFLOW_ENABLED)) ?
1017 COMPRESS_BUF_SIZE_RATIO_OVERFLOW :
1020 data_size = strlen(test_bufs[i]) * ratio_val;
1022 priv_data = (struct priv_op_data *)
1023 (ops_processed[i] + 1);
1024 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
1033 * Memory buffers preparation (for both compression and decompression).
1035 * Function allocates output buffers to perform compression
1036 * or decompression operations depending on value of op_type.
1039 * Operation type: compress or decompress
1040 * @param out_of_space_and_zlib
1041 * Boolean value to switch into "out of space" buffer if set.
1042 * To test "out-of-space" data size, zlib_decompress must be set as well.
1043 * @param test_priv_data
1044 * A container used for aggregation all the private test arrays.
1046 * Interim data containing session/transformation objects.
1048 * The test parameters set by users (command line parameters).
1049 * @param current_extbuf_info,
1050 * The structure containing all the information related to external mbufs
1056 test_setup_output_bufs(
1057 enum operation_type op_type,
1058 unsigned int out_of_space_and_zlib,
1059 const struct test_private_arrays *test_priv_data,
1060 const struct interim_data_params *int_data,
1061 const struct test_data_params *test_data,
1062 struct rte_mbuf_ext_shared_info *current_extbuf_info)
1064 /* local variables: */
1070 /* from test_priv_data: */
1071 struct rte_mbuf **current_bufs;
1073 /* from int_data: */
1074 unsigned int num_bufs = int_data->num_bufs;
1076 /* from test_data: */
1077 unsigned int buff_type = test_data->buff_type;
1078 unsigned int big_data = test_data->big_data;
1079 const struct rte_memzone *current_memzone;
1081 struct comp_testsuite_params *ts_params = &testsuite_params;
1082 struct rte_mempool *buf_pool;
1085 buf_pool = ts_params->big_mbuf_pool;
1086 else if (buff_type == SGL_BOTH)
1087 buf_pool = ts_params->small_mbuf_pool;
1089 buf_pool = ts_params->large_mbuf_pool;
1091 if (op_type == OPERATION_COMPRESSION)
1092 current_bufs = test_priv_data->comp_bufs;
1094 current_bufs = test_priv_data->uncomp_bufs;
1096 /* the mbufs allocation*/
1097 ret = rte_pktmbuf_alloc_bulk(buf_pool, current_bufs, num_bufs);
1100 "Destination mbufs could not be allocated "
1101 "from the mempool\n");
1105 if (test_data->use_external_mbufs) {
1106 current_extbuf_info->free_cb = extbuf_free_callback;
1107 current_extbuf_info->fcb_opaque = NULL;
1108 rte_mbuf_ext_refcnt_set(current_extbuf_info, 1);
1109 if (op_type == OPERATION_COMPRESSION)
1110 current_memzone = test_data->compbuf_memzone;
1112 current_memzone = test_data->uncompbuf_memzone;
1114 for (i = 0; i < num_bufs; i++) {
1115 rte_pktmbuf_attach_extbuf(current_bufs[i],
1116 current_memzone->addr,
1117 current_memzone->iova,
1118 current_memzone->len,
1119 current_extbuf_info);
1120 rte_pktmbuf_append(current_bufs[i],
1121 current_memzone->len);
1124 for (i = 0; i < num_bufs; i++) {
1126 enum rte_comp_huffman comp_huffman =
1127 ts_params->def_comp_xform->compress.deflate.huffman;
1129 /* data size calculation */
1130 data_size = test_mbufs_calculate_data_size(
1132 out_of_space_and_zlib,
1138 if (comp_huffman != RTE_COMP_HUFFMAN_DYNAMIC) {
1139 if (op_type == OPERATION_DECOMPRESSION)
1140 data_size *= COMPRESS_BUF_SIZE_RATIO;
1143 /* data allocation */
1144 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1145 ret = prepare_sgl_bufs(NULL, current_bufs[i],
1147 big_data ? buf_pool :
1148 ts_params->small_mbuf_pool,
1149 big_data ? buf_pool :
1150 ts_params->large_mbuf_pool,
1151 big_data ? 0 : MAX_SEGS,
1152 big_data ? MAX_DATA_MBUF_SIZE :
1157 buf_ptr = rte_pktmbuf_append(current_bufs[i],
1159 if (buf_ptr == NULL) {
1161 "Append extra bytes to the destination mbuf failed\n");
1172 * The main compression function.
1174 * Function performs compression operation.
1175 * Operation(s) configuration, depending on CLI parameters.
1176 * Operation(s) processing.
1179 * Interim data containing session/transformation objects.
1181 * The test parameters set by users (command line parameters).
1182 * @param test_priv_data
1183 * A container used for aggregation all the private test arrays.
1189 test_deflate_comp_run(const struct interim_data_params *int_data,
1190 const struct test_data_params *test_data,
1191 const struct test_private_arrays *test_priv_data)
1193 /* local variables: */
1194 struct priv_op_data *priv_data;
1196 uint16_t num_priv_xforms = 0;
1201 struct comp_testsuite_params *ts_params = &testsuite_params;
1203 /* from test_data: */
1204 enum rte_comp_op_type operation_type = test_data->compress_state;
1205 unsigned int zlib_compress =
1206 (test_data->zlib_dir == ZLIB_ALL ||
1207 test_data->zlib_dir == ZLIB_COMPRESS);
1209 /* from int_data: */
1210 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1211 unsigned int num_xforms = int_data->num_xforms;
1212 unsigned int num_bufs = int_data->num_bufs;
1214 /* from test_priv_data: */
1215 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1216 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1217 struct rte_comp_op **ops = test_priv_data->ops;
1218 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1219 void **priv_xforms = test_priv_data->priv_xforms;
1221 const struct rte_compressdev_capabilities *capa =
1222 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1224 /* Build the compression operations */
1225 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1228 "Compress operations could not be allocated "
1229 "from the mempool\n");
1234 for (i = 0; i < num_bufs; i++) {
1235 ops[i]->m_src = uncomp_bufs[i];
1236 ops[i]->m_dst = comp_bufs[i];
1237 ops[i]->src.offset = 0;
1238 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
1239 ops[i]->dst.offset = 0;
1241 RTE_LOG(DEBUG, USER1,
1242 "Uncompressed buffer length = %u compressed buffer length = %u",
1243 rte_pktmbuf_pkt_len(uncomp_bufs[i]),
1244 rte_pktmbuf_pkt_len(comp_bufs[i]));
1246 if (operation_type == RTE_COMP_OP_STATELESS) {
1247 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1250 "Compression: stateful operations are not "
1251 "supported in these tests yet\n");
1255 ops[i]->input_chksum = 0;
1257 * Store original operation index in private data,
1258 * since ordering does not have to be maintained,
1259 * when dequeuing from compressdev, so a comparison
1260 * at the end of the test can be done.
1262 priv_data = (struct priv_op_data *) (ops[i] + 1);
1263 priv_data->orig_idx = i;
1266 /* Compress data (either with Zlib API or compressdev API */
1267 if (zlib_compress) {
1268 for (i = 0; i < num_bufs; i++) {
1269 const struct rte_comp_xform *compress_xform =
1270 compress_xforms[i % num_xforms];
1271 ret = compress_zlib(ops[i], compress_xform,
1278 ops_processed[i] = ops[i];
1281 /* Create compress private xform data */
1282 for (i = 0; i < num_xforms; i++) {
1283 ret = rte_compressdev_private_xform_create(0,
1284 (const struct rte_comp_xform *)
1289 "Compression private xform "
1290 "could not be created\n");
1296 if (capa->comp_feature_flags &
1297 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1298 /* Attach shareable private xform data to ops */
1299 for (i = 0; i < num_bufs; i++)
1300 ops[i]->private_xform =
1301 priv_xforms[i % num_xforms];
1303 /* Create rest of the private xforms for the other ops */
1304 for (i = num_xforms; i < num_bufs; i++) {
1305 ret = rte_compressdev_private_xform_create(0,
1306 compress_xforms[i % num_xforms],
1310 "Compression private xform "
1311 "could not be created\n");
1317 /* Attach non shareable private xform data to ops */
1318 for (i = 0; i < num_bufs; i++)
1319 ops[i]->private_xform = priv_xforms[i];
1323 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
1326 "Compression: enqueue/dequeue operation failed\n");
1331 for (i = 0; i < num_bufs; i++) {
1332 test_priv_data->compressed_data_size[i] +=
1333 ops_processed[i]->produced;
1335 if (ops_processed[i]->status ==
1336 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
1339 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1340 ops[i]->src.offset +=
1341 ops_processed[i]->consumed;
1342 ops[i]->src.length -=
1343 ops_processed[i]->consumed;
1344 ops[i]->dst.offset +=
1345 ops_processed[i]->produced;
1347 buf_ptr = rte_pktmbuf_append(
1349 ops_processed[i]->produced);
1351 if (buf_ptr == NULL) {
1353 "Data recovery: append extra bytes to the current mbuf failed\n");
1363 /* Free resources */
1365 for (i = 0; i < num_bufs; i++) {
1366 rte_comp_op_free(ops[i]);
1368 ops_processed[i] = NULL;
1371 /* Free compress private xforms */
1372 for (i = 0; i < num_priv_xforms; i++) {
1373 if (priv_xforms[i] != NULL) {
1374 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1375 priv_xforms[i] = NULL;
1383 * Prints out the test report. Memory freeing.
1385 * Called after successful compression.
1386 * Operation(s) status validation and decompression buffers freeing.
1388 * -1 returned if function fail.
1391 * Interim data containing session/transformation objects.
1393 * The test parameters set by users (command line parameters).
1394 * @param test_priv_data
1395 * A container used for aggregation all the private test arrays.
1397 * - 2: Some operation is not supported
1398 * - 1: Decompression should be skipped
1403 test_deflate_comp_finalize(const struct interim_data_params *int_data,
1404 const struct test_data_params *test_data,
1405 const struct test_private_arrays *test_priv_data)
1407 /* local variables: */
1409 struct priv_op_data *priv_data;
1411 /* from int_data: */
1412 unsigned int num_xforms = int_data->num_xforms;
1413 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1414 unsigned int num_bufs = int_data->num_bufs;
1416 /* from test_priv_data: */
1417 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1418 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1419 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1420 struct rte_comp_op **ops = test_priv_data->ops;
1422 /* from test_data: */
1423 unsigned int out_of_space = test_data->out_of_space;
1424 unsigned int zlib_compress =
1425 (test_data->zlib_dir == ZLIB_ALL ||
1426 test_data->zlib_dir == ZLIB_COMPRESS);
1427 unsigned int zlib_decompress =
1428 (test_data->zlib_dir == ZLIB_ALL ||
1429 test_data->zlib_dir == ZLIB_DECOMPRESS);
1431 for (i = 0; i < num_bufs; i++) {
1432 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1433 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1434 const struct rte_comp_compress_xform *compress_xform =
1435 &compress_xforms[xform_idx]->compress;
1436 enum rte_comp_huffman huffman_type =
1437 compress_xform->deflate.huffman;
1438 char engine[] = "zlib (directly, not PMD)";
1439 if (zlib_decompress)
1440 strlcpy(engine, "PMD", sizeof(engine));
1442 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1443 " %u bytes (level = %d, huffman = %s)\n",
1445 ops_processed[i]->consumed, ops_processed[i]->produced,
1446 compress_xform->level,
1447 huffman_type_strings[huffman_type]);
1448 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1449 ops_processed[i]->consumed == 0 ? 0 :
1450 (float)ops_processed[i]->produced /
1451 ops_processed[i]->consumed * 100);
1452 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1453 compress_checksum[i] = ops_processed[i]->output_chksum;
1458 * Check operation status and free source mbufs (destination mbuf and
1459 * compress operation information is needed for the decompression stage)
1461 for (i = 0; i < num_bufs; i++) {
1462 if (out_of_space && !zlib_compress) {
1463 if (ops_processed[i]->status !=
1464 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1466 "Operation without expected out of "
1467 "space status error\n");
1473 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1474 if (test_data->overflow == OVERFLOW_ENABLED) {
1475 if (ops_processed[i]->status ==
1476 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1477 RTE_LOG(INFO, USER1,
1478 "Out-of-space-recoverable functionality"
1479 " is not supported on this device\n");
1485 "Comp: Some operations were not successful\n");
1488 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1489 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1490 uncomp_bufs[priv_data->orig_idx] = NULL;
1493 if (out_of_space && !zlib_compress)
1500 * The main decompression function.
1502 * Function performs decompression operation.
1503 * Operation(s) configuration, depending on CLI parameters.
1504 * Operation(s) processing.
1507 * Interim data containing session/transformation objects.
1509 * The test parameters set by users (command line parameters).
1510 * @param test_priv_data
1511 * A container used for aggregation all the private test arrays.
1517 test_deflate_decomp_run(const struct interim_data_params *int_data,
1518 const struct test_data_params *test_data,
1519 struct test_private_arrays *test_priv_data)
1522 /* local variables: */
1523 struct priv_op_data *priv_data;
1525 uint16_t num_priv_xforms = 0;
1529 struct comp_testsuite_params *ts_params = &testsuite_params;
1531 /* from test_data: */
1532 enum rte_comp_op_type operation_type = test_data->decompress_state;
1533 unsigned int zlib_decompress =
1534 (test_data->zlib_dir == ZLIB_ALL ||
1535 test_data->zlib_dir == ZLIB_DECOMPRESS);
1537 /* from int_data: */
1538 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
1539 unsigned int num_xforms = int_data->num_xforms;
1540 unsigned int num_bufs = int_data->num_bufs;
1542 /* from test_priv_data: */
1543 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1544 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1545 struct rte_comp_op **ops = test_priv_data->ops;
1546 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1547 void **priv_xforms = test_priv_data->priv_xforms;
1548 uint32_t *compressed_data_size = test_priv_data->compressed_data_size;
1549 void **stream = test_priv_data->stream;
1551 const struct rte_compressdev_capabilities *capa =
1552 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1554 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1557 "Decompress operations could not be allocated "
1558 "from the mempool\n");
1563 /* Source buffer is the compressed data from the previous operations */
1564 for (i = 0; i < num_bufs; i++) {
1565 ops[i]->m_src = comp_bufs[i];
1566 ops[i]->m_dst = uncomp_bufs[i];
1567 ops[i]->src.offset = 0;
1569 * Set the length of the compressed data to the
1570 * number of bytes that were produced in the previous stage
1573 if (compressed_data_size[i])
1574 ops[i]->src.length = compressed_data_size[i];
1576 ops[i]->src.length = ops_processed[i]->produced;
1578 ops[i]->dst.offset = 0;
1580 if (operation_type == RTE_COMP_OP_STATELESS) {
1581 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1582 ops[i]->op_type = RTE_COMP_OP_STATELESS;
1583 } else if (!zlib_decompress) {
1584 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1585 ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1588 "Decompression: stateful operations are"
1589 " not supported in these tests yet\n");
1593 ops[i]->input_chksum = 0;
1595 * Copy private data from previous operations,
1596 * to keep the pointer to the original buffer
1598 memcpy(ops[i] + 1, ops_processed[i] + 1,
1599 sizeof(struct priv_op_data));
1603 * Free the previous compress operations,
1604 * as they are not needed anymore
1606 rte_comp_op_bulk_free(ops_processed, num_bufs);
1608 /* Decompress data (either with Zlib API or compressdev API */
1609 if (zlib_decompress) {
1610 for (i = 0; i < num_bufs; i++) {
1611 priv_data = (struct priv_op_data *)(ops[i] + 1);
1612 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1613 const struct rte_comp_xform *decompress_xform =
1614 decompress_xforms[xform_idx];
1616 ret = decompress_zlib(ops[i], decompress_xform);
1622 ops_processed[i] = ops[i];
1625 if (operation_type == RTE_COMP_OP_STATELESS) {
1626 /* Create decompress private xform data */
1627 for (i = 0; i < num_xforms; i++) {
1628 ret = rte_compressdev_private_xform_create(0,
1629 (const struct rte_comp_xform *)
1630 decompress_xforms[i],
1634 "Decompression private xform "
1635 "could not be created\n");
1642 if (capa->comp_feature_flags &
1643 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1644 /* Attach shareable private xform data to ops */
1645 for (i = 0; i < num_bufs; i++) {
1646 priv_data = (struct priv_op_data *)
1648 uint16_t xform_idx =
1649 priv_data->orig_idx % num_xforms;
1650 ops[i]->private_xform =
1651 priv_xforms[xform_idx];
1654 /* Create rest of the private xforms */
1655 /* for the other ops */
1656 for (i = num_xforms; i < num_bufs; i++) {
1658 rte_compressdev_private_xform_create(0,
1659 decompress_xforms[i % num_xforms],
1663 "Decompression private xform"
1664 " could not be created\n");
1671 /* Attach non shareable private xform data */
1673 for (i = 0; i < num_bufs; i++) {
1674 priv_data = (struct priv_op_data *)
1676 uint16_t xform_idx =
1677 priv_data->orig_idx;
1678 ops[i]->private_xform =
1679 priv_xforms[xform_idx];
1683 /* Create a stream object for stateful decompression */
1684 ret = rte_compressdev_stream_create(0,
1685 decompress_xforms[0], stream);
1688 "Decompression stream could not be created, error %d\n",
1693 /* Attach stream to ops */
1694 for (i = 0; i < num_bufs; i++)
1695 ops[i]->stream = *stream;
1698 test_priv_data->num_priv_xforms = num_priv_xforms;
1706 * Prints out the test report. Memory freeing.
1708 * Called after successful decompression.
1709 * Operation(s) status validation and compression buffers freeing.
1711 * -1 returned if function fail.
1714 * Interim data containing session/transformation objects.
1716 * The test parameters set by users (command line parameters).
1717 * @param test_priv_data
1718 * A container used for aggregation all the private test arrays.
1720 * - 2: Next step must be executed by the caller (stateful decompression only)
1721 * - 1: On success (caller should stop and exit)
1726 test_deflate_decomp_finalize(const struct interim_data_params *int_data,
1727 const struct test_data_params *test_data,
1728 const struct test_private_arrays *test_priv_data)
1730 /* local variables: */
1732 struct priv_op_data *priv_data;
1733 static unsigned int step;
1735 /* from int_data: */
1736 unsigned int num_bufs = int_data->num_bufs;
1737 const char * const *test_bufs = int_data->test_bufs;
1738 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1740 /* from test_priv_data: */
1741 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1742 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1743 struct rte_comp_op **ops = test_priv_data->ops;
1744 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1745 unsigned int *decomp_produced_data_size =
1746 test_priv_data->decomp_produced_data_size;
1747 char **all_decomp_data = test_priv_data->all_decomp_data;
1749 /* from test_data: */
1750 unsigned int out_of_space = test_data->out_of_space;
1751 enum rte_comp_op_type operation_type = test_data->decompress_state;
1753 unsigned int zlib_compress =
1754 (test_data->zlib_dir == ZLIB_ALL ||
1755 test_data->zlib_dir == ZLIB_COMPRESS);
1756 unsigned int zlib_decompress =
1757 (test_data->zlib_dir == ZLIB_ALL ||
1758 test_data->zlib_dir == ZLIB_DECOMPRESS);
1760 for (i = 0; i < num_bufs; i++) {
1761 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1762 char engine[] = "zlib, (directly, no PMD)";
1764 strlcpy(engine, "pmd", sizeof(engine));
1765 RTE_LOG(DEBUG, USER1,
1766 "Buffer %u decompressed by %s from %u to %u bytes\n",
1768 ops_processed[i]->consumed, ops_processed[i]->produced);
1773 * Check operation status and free source mbuf (destination mbuf and
1774 * compress operation information is still needed)
1776 for (i = 0; i < num_bufs; i++) {
1777 if (out_of_space && !zlib_decompress) {
1778 if (ops_processed[i]->status !=
1779 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1782 "Operation without expected out of "
1783 "space status error\n");
1789 if (operation_type == RTE_COMP_OP_STATEFUL
1790 && (ops_processed[i]->status ==
1791 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1792 || ops_processed[i]->status ==
1793 RTE_COMP_OP_STATUS_SUCCESS)) {
1795 RTE_LOG(DEBUG, USER1,
1796 ".............RECOVERABLE\n");
1798 /* collect the output into all_decomp_data */
1799 const void *ptr = rte_pktmbuf_read(
1800 ops_processed[i]->m_dst,
1801 ops_processed[i]->dst.offset,
1802 ops_processed[i]->produced,
1804 *decomp_produced_data_size);
1805 if (ptr != *all_decomp_data +
1806 *decomp_produced_data_size)
1807 rte_memcpy(*all_decomp_data +
1808 *decomp_produced_data_size,
1809 ptr, ops_processed[i]->produced);
1811 *decomp_produced_data_size +=
1812 ops_processed[i]->produced;
1813 if (ops_processed[i]->src.length >
1814 ops_processed[i]->consumed) {
1815 if (ops_processed[i]->status ==
1816 RTE_COMP_OP_STATUS_SUCCESS) {
1818 "Operation finished too early\n");
1822 if (step >= test_data->decompress_steps_max) {
1824 "Operation exceeded maximum steps\n");
1827 ops[i] = ops_processed[i];
1829 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1830 ops[i]->src.offset +=
1831 ops_processed[i]->consumed;
1832 ops[i]->src.length -=
1833 ops_processed[i]->consumed;
1834 /* repeat the operation */
1837 /* Compare the original stream with the */
1838 /* decompressed stream (in size and the data) */
1839 priv_data = (struct priv_op_data *)
1840 (ops_processed[i] + 1);
1842 test_bufs[priv_data->orig_idx];
1843 const char *buf2 = *all_decomp_data;
1845 if (compare_buffers(buf1, strlen(buf1) + 1,
1846 buf2, *decomp_produced_data_size) < 0)
1848 /* Test checksums */
1849 if (compress_xforms[0]->compress.chksum
1850 != RTE_COMP_CHECKSUM_NONE) {
1851 if (ops_processed[i]->output_chksum
1852 != compress_checksum[i]) {
1854 "The checksums differ\n"
1855 "Compression Checksum: %" PRIu64 "\tDecompression "
1856 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1857 ops_processed[i]->output_chksum);
1862 } else if (ops_processed[i]->status !=
1863 RTE_COMP_OP_STATUS_SUCCESS) {
1865 "Decomp: Some operations were not successful, status = %u\n",
1866 ops_processed[i]->status);
1869 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1870 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1871 comp_bufs[priv_data->orig_idx] = NULL;
1874 if (out_of_space && !zlib_decompress)
1881 * Validation of the output (compression/decompression) data.
1883 * The function compares the source stream with the output stream,
1884 * after decompression, to check if compression/decompression
1886 * -1 returned if function fail.
1889 * Interim data containing session/transformation objects.
1891 * The test parameters set by users (command line parameters).
1892 * @param test_priv_data
1893 * A container used for aggregation all the private test arrays.
1899 test_results_validation(const struct interim_data_params *int_data,
1900 const struct test_data_params *test_data,
1901 const struct test_private_arrays *test_priv_data)
1903 /* local variables: */
1905 struct priv_op_data *priv_data;
1908 char *contig_buf = NULL;
1911 /* from int_data: */
1912 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1913 unsigned int num_bufs = int_data->num_bufs;
1914 const char * const *test_bufs = int_data->test_bufs;
1916 /* from test_priv_data: */
1917 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1918 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1921 * Compare the original stream with the decompressed stream
1922 * (in size and the data)
1924 for (i = 0; i < num_bufs; i++) {
1925 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1926 buf1 = test_data->use_external_mbufs ?
1927 test_data->inbuf_memzone->addr :
1928 test_bufs[priv_data->orig_idx];
1929 data_size = test_data->use_external_mbufs ?
1930 test_data->inbuf_data_size :
1933 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1934 if (contig_buf == NULL) {
1935 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1940 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1941 ops_processed[i]->produced, contig_buf);
1942 if (compare_buffers(buf1, data_size,
1943 buf2, ops_processed[i]->produced) < 0)
1946 /* Test checksums */
1947 if (compress_xforms[0]->compress.chksum !=
1948 RTE_COMP_CHECKSUM_NONE) {
1949 if (ops_processed[i]->output_chksum !=
1950 compress_checksum[i]) {
1951 RTE_LOG(ERR, USER1, "The checksums differ\n"
1952 "Compression Checksum: %" PRIu64 "\tDecompression "
1953 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1954 ops_processed[i]->output_chksum);
1959 rte_free(contig_buf);
1965 rte_free(contig_buf);
1970 * Compresses and decompresses input stream with compressdev API and Zlib API
1972 * Basic test function. Common for all the functional tests.
1973 * -1 returned if function fail.
1976 * Interim data containing session/transformation objects.
1978 * The test parameters set by users (command line parameters).
1980 * - 1: Some operation not supported
1986 test_deflate_comp_decomp(const struct interim_data_params *int_data,
1987 const struct test_data_params *test_data)
1989 unsigned int num_bufs = int_data->num_bufs;
1990 unsigned int out_of_space = test_data->out_of_space;
1992 void *stream = NULL;
1993 char *all_decomp_data = NULL;
1994 unsigned int decomp_produced_data_size = 0;
1996 int ret_status = -1;
1998 struct rte_mbuf *uncomp_bufs[num_bufs];
1999 struct rte_mbuf *comp_bufs[num_bufs];
2000 struct rte_comp_op *ops[num_bufs];
2001 struct rte_comp_op *ops_processed[num_bufs];
2002 void *priv_xforms[num_bufs];
2005 uint64_t compress_checksum[num_bufs];
2006 uint32_t compressed_data_size[num_bufs];
2007 char *contig_buf = NULL;
2009 struct rte_mbuf_ext_shared_info compbuf_info;
2010 struct rte_mbuf_ext_shared_info decompbuf_info;
2012 const struct rte_compressdev_capabilities *capa;
2014 /* Compressing with CompressDev */
2015 unsigned int zlib_compress =
2016 (test_data->zlib_dir == ZLIB_ALL ||
2017 test_data->zlib_dir == ZLIB_COMPRESS);
2018 unsigned int zlib_decompress =
2019 (test_data->zlib_dir == ZLIB_ALL ||
2020 test_data->zlib_dir == ZLIB_DECOMPRESS);
2022 struct test_private_arrays test_priv_data;
2024 test_priv_data.uncomp_bufs = uncomp_bufs;
2025 test_priv_data.comp_bufs = comp_bufs;
2026 test_priv_data.ops = ops;
2027 test_priv_data.ops_processed = ops_processed;
2028 test_priv_data.priv_xforms = priv_xforms;
2029 test_priv_data.compress_checksum = compress_checksum;
2030 test_priv_data.compressed_data_size = compressed_data_size;
2032 test_priv_data.stream = &stream;
2033 test_priv_data.all_decomp_data = &all_decomp_data;
2034 test_priv_data.decomp_produced_data_size = &decomp_produced_data_size;
2036 test_priv_data.num_priv_xforms = 0; /* it's used for decompression only */
2038 capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2041 "Compress device does not support DEFLATE\n");
2045 /* Prepare the source mbufs with the data */
2046 ret = test_setup_com_bufs(int_data, test_data, &test_priv_data);
2052 RTE_LOG(DEBUG, USER1, "<<< COMPRESSION >>>\n");
2056 /* Prepare output (destination) mbufs for compressed data */
2057 ret = test_setup_output_bufs(
2058 OPERATION_COMPRESSION,
2059 out_of_space == 1 && !zlib_compress,
2069 /* Run compression */
2070 ret = test_deflate_comp_run(int_data, test_data, &test_priv_data);
2076 ret = test_deflate_comp_finalize(int_data, test_data, &test_priv_data);
2080 } else if (ret == 1) {
2083 } else if (ret == 2) {
2084 ret_status = 1; /* some operation not supported */
2090 RTE_LOG(DEBUG, USER1, "<<< DECOMPRESSION >>>\n");
2092 /* Prepare output (destination) mbufs for decompressed data */
2093 ret = test_setup_output_bufs(
2094 OPERATION_DECOMPRESSION,
2095 out_of_space == 1 && !zlib_decompress,
2105 /* Run decompression */
2106 ret = test_deflate_decomp_run(int_data, test_data, &test_priv_data);
2112 if (!zlib_decompress) {
2113 next_step: /* next step for stateful decompression only */
2114 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
2118 "Decompression: enqueue/dequeue operation failed\n");
2122 ret = test_deflate_decomp_finalize(int_data, test_data, &test_priv_data);
2126 } else if (ret == 1) {
2129 } else if (ret == 2) {
2133 /* FINAL PROCESSING */
2135 ret = test_results_validation(int_data, test_data, &test_priv_data);
2143 /* Free resources */
2146 rte_compressdev_stream_free(0, stream);
2147 rte_free(all_decomp_data);
2149 /* Free compress private xforms */
2150 for (i = 0; i < test_priv_data.num_priv_xforms; i++) {
2151 if (priv_xforms[i] != NULL) {
2152 rte_compressdev_private_xform_free(0, priv_xforms[i]);
2153 priv_xforms[i] = NULL;
2156 for (i = 0; i < num_bufs; i++) {
2157 rte_pktmbuf_free(uncomp_bufs[i]);
2158 rte_pktmbuf_free(comp_bufs[i]);
2159 rte_comp_op_free(ops[i]);
2160 rte_comp_op_free(ops_processed[i]);
2162 rte_free(contig_buf);
2168 test_compressdev_deflate_stateless_fixed(void)
2170 struct comp_testsuite_params *ts_params = &testsuite_params;
2173 const struct rte_compressdev_capabilities *capab;
2175 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2176 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2178 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2181 struct rte_comp_xform *compress_xform =
2182 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2184 if (compress_xform == NULL) {
2186 "Compress xform could not be created\n");
2191 memcpy(compress_xform, ts_params->def_comp_xform,
2192 sizeof(struct rte_comp_xform));
2193 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
2195 struct interim_data_params int_data = {
2200 &ts_params->def_decomp_xform,
2204 struct test_data_params test_data = {
2205 .compress_state = RTE_COMP_OP_STATELESS,
2206 .decompress_state = RTE_COMP_OP_STATELESS,
2207 .buff_type = LB_BOTH,
2208 .zlib_dir = ZLIB_DECOMPRESS,
2211 .overflow = OVERFLOW_DISABLED,
2212 .ratio = RATIO_ENABLED
2215 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2216 int_data.test_bufs = &compress_test_bufs[i];
2217 int_data.buf_idx = &i;
2219 /* Compress with compressdev, decompress with Zlib */
2220 test_data.zlib_dir = ZLIB_DECOMPRESS;
2221 ret = test_deflate_comp_decomp(&int_data, &test_data);
2225 /* Compress with Zlib, decompress with compressdev */
2226 test_data.zlib_dir = ZLIB_COMPRESS;
2227 ret = test_deflate_comp_decomp(&int_data, &test_data);
2235 rte_free(compress_xform);
2240 test_compressdev_deflate_stateless_dynamic(void)
2242 struct comp_testsuite_params *ts_params = &testsuite_params;
2245 struct rte_comp_xform *compress_xform =
2246 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2248 const struct rte_compressdev_capabilities *capab;
2250 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2251 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2253 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2256 if (compress_xform == NULL) {
2258 "Compress xform could not be created\n");
2263 memcpy(compress_xform, ts_params->def_comp_xform,
2264 sizeof(struct rte_comp_xform));
2265 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
2267 struct interim_data_params int_data = {
2272 &ts_params->def_decomp_xform,
2276 struct test_data_params test_data = {
2277 .compress_state = RTE_COMP_OP_STATELESS,
2278 .decompress_state = RTE_COMP_OP_STATELESS,
2279 .buff_type = LB_BOTH,
2280 .zlib_dir = ZLIB_DECOMPRESS,
2283 .overflow = OVERFLOW_DISABLED,
2284 .ratio = RATIO_ENABLED
2287 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2288 int_data.test_bufs = &compress_test_bufs[i];
2289 int_data.buf_idx = &i;
2291 /* Compress with compressdev, decompress with Zlib */
2292 test_data.zlib_dir = ZLIB_DECOMPRESS;
2293 ret = test_deflate_comp_decomp(&int_data, &test_data);
2297 /* Compress with Zlib, decompress with compressdev */
2298 test_data.zlib_dir = ZLIB_COMPRESS;
2299 ret = test_deflate_comp_decomp(&int_data, &test_data);
2307 rte_free(compress_xform);
2312 test_compressdev_deflate_stateless_multi_op(void)
2314 struct comp_testsuite_params *ts_params = &testsuite_params;
2315 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
2316 uint16_t buf_idx[num_bufs];
2320 for (i = 0; i < num_bufs; i++)
2323 struct interim_data_params int_data = {
2327 &ts_params->def_comp_xform,
2328 &ts_params->def_decomp_xform,
2332 struct test_data_params test_data = {
2333 .compress_state = RTE_COMP_OP_STATELESS,
2334 .decompress_state = RTE_COMP_OP_STATELESS,
2335 .buff_type = LB_BOTH,
2336 .zlib_dir = ZLIB_DECOMPRESS,
2339 .overflow = OVERFLOW_DISABLED,
2340 .ratio = RATIO_ENABLED
2343 /* Compress with compressdev, decompress with Zlib */
2344 test_data.zlib_dir = ZLIB_DECOMPRESS;
2345 ret = test_deflate_comp_decomp(&int_data, &test_data);
2349 /* Compress with Zlib, decompress with compressdev */
2350 test_data.zlib_dir = ZLIB_COMPRESS;
2351 ret = test_deflate_comp_decomp(&int_data, &test_data);
2355 return TEST_SUCCESS;
2359 test_compressdev_deflate_stateless_multi_level(void)
2361 struct comp_testsuite_params *ts_params = &testsuite_params;
2365 struct rte_comp_xform *compress_xform =
2366 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2368 if (compress_xform == NULL) {
2370 "Compress xform could not be created\n");
2375 memcpy(compress_xform, ts_params->def_comp_xform,
2376 sizeof(struct rte_comp_xform));
2378 struct interim_data_params int_data = {
2383 &ts_params->def_decomp_xform,
2387 struct test_data_params test_data = {
2388 .compress_state = RTE_COMP_OP_STATELESS,
2389 .decompress_state = RTE_COMP_OP_STATELESS,
2390 .buff_type = LB_BOTH,
2391 .zlib_dir = ZLIB_DECOMPRESS,
2394 .overflow = OVERFLOW_DISABLED,
2395 .ratio = RATIO_ENABLED
2398 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2399 int_data.test_bufs = &compress_test_bufs[i];
2400 int_data.buf_idx = &i;
2402 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
2404 compress_xform->compress.level = level;
2405 /* Compress with compressdev, decompress with Zlib */
2406 test_data.zlib_dir = ZLIB_DECOMPRESS;
2407 ret = test_deflate_comp_decomp(&int_data, &test_data);
2416 rte_free(compress_xform);
2420 #define NUM_XFORMS 3
2422 test_compressdev_deflate_stateless_multi_xform(void)
2424 struct comp_testsuite_params *ts_params = &testsuite_params;
2425 uint16_t num_bufs = NUM_XFORMS;
2426 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
2427 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
2428 const char *test_buffers[NUM_XFORMS];
2430 unsigned int level = RTE_COMP_LEVEL_MIN;
2431 uint16_t buf_idx[num_bufs];
2434 /* Create multiple xforms with various levels */
2435 for (i = 0; i < NUM_XFORMS; i++) {
2436 compress_xforms[i] = rte_malloc(NULL,
2437 sizeof(struct rte_comp_xform), 0);
2438 if (compress_xforms[i] == NULL) {
2440 "Compress xform could not be created\n");
2445 memcpy(compress_xforms[i], ts_params->def_comp_xform,
2446 sizeof(struct rte_comp_xform));
2447 compress_xforms[i]->compress.level = level;
2450 decompress_xforms[i] = rte_malloc(NULL,
2451 sizeof(struct rte_comp_xform), 0);
2452 if (decompress_xforms[i] == NULL) {
2454 "Decompress xform could not be created\n");
2459 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
2460 sizeof(struct rte_comp_xform));
2463 for (i = 0; i < NUM_XFORMS; i++) {
2465 /* Use the same buffer in all sessions */
2466 test_buffers[i] = compress_test_bufs[0];
2469 struct interim_data_params int_data = {
2478 struct test_data_params test_data = {
2479 .compress_state = RTE_COMP_OP_STATELESS,
2480 .decompress_state = RTE_COMP_OP_STATELESS,
2481 .buff_type = LB_BOTH,
2482 .zlib_dir = ZLIB_DECOMPRESS,
2485 .overflow = OVERFLOW_DISABLED,
2486 .ratio = RATIO_ENABLED
2489 /* Compress with compressdev, decompress with Zlib */
2490 ret = test_deflate_comp_decomp(&int_data, &test_data);
2497 for (i = 0; i < NUM_XFORMS; i++) {
2498 rte_free(compress_xforms[i]);
2499 rte_free(decompress_xforms[i]);
2506 test_compressdev_deflate_stateless_sgl(void)
2508 struct comp_testsuite_params *ts_params = &testsuite_params;
2511 const struct rte_compressdev_capabilities *capab;
2513 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2514 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2516 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2519 struct interim_data_params int_data = {
2523 &ts_params->def_comp_xform,
2524 &ts_params->def_decomp_xform,
2528 struct test_data_params test_data = {
2529 .compress_state = RTE_COMP_OP_STATELESS,
2530 .decompress_state = RTE_COMP_OP_STATELESS,
2531 .buff_type = SGL_BOTH,
2532 .zlib_dir = ZLIB_DECOMPRESS,
2535 .overflow = OVERFLOW_DISABLED,
2536 .ratio = RATIO_ENABLED
2539 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2540 int_data.test_bufs = &compress_test_bufs[i];
2541 int_data.buf_idx = &i;
2543 /* Compress with compressdev, decompress with Zlib */
2544 test_data.zlib_dir = ZLIB_DECOMPRESS;
2545 ret = test_deflate_comp_decomp(&int_data, &test_data);
2549 /* Compress with Zlib, decompress with compressdev */
2550 test_data.zlib_dir = ZLIB_COMPRESS;
2551 ret = test_deflate_comp_decomp(&int_data, &test_data);
2555 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
2556 /* Compress with compressdev, decompress with Zlib */
2557 test_data.zlib_dir = ZLIB_DECOMPRESS;
2558 test_data.buff_type = SGL_TO_LB;
2559 ret = test_deflate_comp_decomp(&int_data, &test_data);
2563 /* Compress with Zlib, decompress with compressdev */
2564 test_data.zlib_dir = ZLIB_COMPRESS;
2565 test_data.buff_type = SGL_TO_LB;
2566 ret = test_deflate_comp_decomp(&int_data, &test_data);
2571 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
2572 /* Compress with compressdev, decompress with Zlib */
2573 test_data.zlib_dir = ZLIB_DECOMPRESS;
2574 test_data.buff_type = LB_TO_SGL;
2575 ret = test_deflate_comp_decomp(&int_data, &test_data);
2579 /* Compress with Zlib, decompress with compressdev */
2580 test_data.zlib_dir = ZLIB_COMPRESS;
2581 test_data.buff_type = LB_TO_SGL;
2582 ret = test_deflate_comp_decomp(&int_data, &test_data);
2588 return TEST_SUCCESS;
2592 test_compressdev_deflate_stateless_checksum(void)
2594 struct comp_testsuite_params *ts_params = &testsuite_params;
2597 const struct rte_compressdev_capabilities *capab;
2599 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2600 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2602 /* Check if driver supports any checksum */
2603 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
2604 (capab->comp_feature_flags &
2605 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
2606 (capab->comp_feature_flags &
2607 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
2610 struct rte_comp_xform *compress_xform =
2611 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2612 if (compress_xform == NULL) {
2613 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2617 memcpy(compress_xform, ts_params->def_comp_xform,
2618 sizeof(struct rte_comp_xform));
2620 struct rte_comp_xform *decompress_xform =
2621 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2622 if (decompress_xform == NULL) {
2623 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2624 rte_free(compress_xform);
2628 memcpy(decompress_xform, ts_params->def_decomp_xform,
2629 sizeof(struct rte_comp_xform));
2631 struct interim_data_params int_data = {
2640 struct test_data_params test_data = {
2641 .compress_state = RTE_COMP_OP_STATELESS,
2642 .decompress_state = RTE_COMP_OP_STATELESS,
2643 .buff_type = LB_BOTH,
2644 .zlib_dir = ZLIB_DECOMPRESS,
2647 .overflow = OVERFLOW_DISABLED,
2648 .ratio = RATIO_ENABLED
2651 /* Check if driver supports crc32 checksum and test */
2652 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
2653 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2654 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2656 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2657 /* Compress with compressdev, decompress with Zlib */
2658 int_data.test_bufs = &compress_test_bufs[i];
2659 int_data.buf_idx = &i;
2661 /* Generate zlib checksum and test against selected
2662 * drivers decompression checksum
2664 test_data.zlib_dir = ZLIB_COMPRESS;
2665 ret = test_deflate_comp_decomp(&int_data, &test_data);
2669 /* Generate compression and decompression
2670 * checksum of selected driver
2672 test_data.zlib_dir = ZLIB_NONE;
2673 ret = test_deflate_comp_decomp(&int_data, &test_data);
2679 /* Check if driver supports adler32 checksum and test */
2680 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2681 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2682 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2684 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2685 int_data.test_bufs = &compress_test_bufs[i];
2686 int_data.buf_idx = &i;
2688 /* Generate zlib checksum and test against selected
2689 * drivers decompression checksum
2691 test_data.zlib_dir = ZLIB_COMPRESS;
2692 ret = test_deflate_comp_decomp(&int_data, &test_data);
2695 /* Generate compression and decompression
2696 * checksum of selected driver
2698 test_data.zlib_dir = ZLIB_NONE;
2699 ret = test_deflate_comp_decomp(&int_data, &test_data);
2705 /* Check if driver supports combined crc and adler checksum and test */
2706 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2707 compress_xform->compress.chksum =
2708 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2709 decompress_xform->decompress.chksum =
2710 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2712 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2713 int_data.test_bufs = &compress_test_bufs[i];
2714 int_data.buf_idx = &i;
2716 /* Generate compression and decompression
2717 * checksum of selected driver
2719 test_data.zlib_dir = ZLIB_NONE;
2720 ret = test_deflate_comp_decomp(&int_data, &test_data);
2729 rte_free(compress_xform);
2730 rte_free(decompress_xform);
2735 test_compressdev_out_of_space_buffer(void)
2737 struct comp_testsuite_params *ts_params = &testsuite_params;
2740 const struct rte_compressdev_capabilities *capab;
2742 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2744 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2745 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2747 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2750 struct interim_data_params int_data = {
2751 &compress_test_bufs[0],
2754 &ts_params->def_comp_xform,
2755 &ts_params->def_decomp_xform,
2759 struct test_data_params test_data = {
2760 .compress_state = RTE_COMP_OP_STATELESS,
2761 .decompress_state = RTE_COMP_OP_STATELESS,
2762 .buff_type = LB_BOTH,
2763 .zlib_dir = ZLIB_DECOMPRESS,
2764 .out_of_space = 1, /* run out-of-space test */
2766 .overflow = OVERFLOW_DISABLED,
2767 .ratio = RATIO_ENABLED
2769 /* Compress with compressdev, decompress with Zlib */
2770 test_data.zlib_dir = ZLIB_DECOMPRESS;
2771 ret = test_deflate_comp_decomp(&int_data, &test_data);
2775 /* Compress with Zlib, decompress with compressdev */
2776 test_data.zlib_dir = ZLIB_COMPRESS;
2777 ret = test_deflate_comp_decomp(&int_data, &test_data);
2781 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2782 /* Compress with compressdev, decompress with Zlib */
2783 test_data.zlib_dir = ZLIB_DECOMPRESS;
2784 test_data.buff_type = SGL_BOTH;
2785 ret = test_deflate_comp_decomp(&int_data, &test_data);
2789 /* Compress with Zlib, decompress with compressdev */
2790 test_data.zlib_dir = ZLIB_COMPRESS;
2791 test_data.buff_type = SGL_BOTH;
2792 ret = test_deflate_comp_decomp(&int_data, &test_data);
2804 test_compressdev_deflate_stateless_dynamic_big(void)
2806 struct comp_testsuite_params *ts_params = &testsuite_params;
2810 const struct rte_compressdev_capabilities *capab;
2811 char *test_buffer = NULL;
2813 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2814 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2816 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2819 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2822 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2823 if (test_buffer == NULL) {
2825 "Can't allocate buffer for big-data\n");
2829 struct interim_data_params int_data = {
2830 (const char * const *)&test_buffer,
2833 &ts_params->def_comp_xform,
2834 &ts_params->def_decomp_xform,
2838 struct test_data_params test_data = {
2839 .compress_state = RTE_COMP_OP_STATELESS,
2840 .decompress_state = RTE_COMP_OP_STATELESS,
2841 .buff_type = SGL_BOTH,
2842 .zlib_dir = ZLIB_DECOMPRESS,
2845 .overflow = OVERFLOW_DISABLED,
2846 .ratio = RATIO_DISABLED
2849 ts_params->def_comp_xform->compress.deflate.huffman =
2850 RTE_COMP_HUFFMAN_DYNAMIC;
2852 /* fill the buffer with data based on rand. data */
2853 srand(BIG_DATA_TEST_SIZE);
2854 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2855 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2856 test_buffer[BIG_DATA_TEST_SIZE - 1] = 0;
2858 /* Compress with compressdev, decompress with Zlib */
2859 test_data.zlib_dir = ZLIB_DECOMPRESS;
2860 ret = test_deflate_comp_decomp(&int_data, &test_data);
2864 /* Compress with Zlib, decompress with compressdev */
2865 test_data.zlib_dir = ZLIB_COMPRESS;
2866 ret = test_deflate_comp_decomp(&int_data, &test_data);
2873 ts_params->def_comp_xform->compress.deflate.huffman =
2874 RTE_COMP_HUFFMAN_DEFAULT;
2875 rte_free(test_buffer);
2880 test_compressdev_deflate_stateful_decomp(void)
2882 struct comp_testsuite_params *ts_params = &testsuite_params;
2885 const struct rte_compressdev_capabilities *capab;
2887 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2888 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2890 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2893 struct interim_data_params int_data = {
2894 &compress_test_bufs[0],
2897 &ts_params->def_comp_xform,
2898 &ts_params->def_decomp_xform,
2902 struct test_data_params test_data = {
2903 .compress_state = RTE_COMP_OP_STATELESS,
2904 .decompress_state = RTE_COMP_OP_STATEFUL,
2905 .buff_type = LB_BOTH,
2906 .zlib_dir = ZLIB_COMPRESS,
2909 .decompress_output_block_size = 2000,
2910 .decompress_steps_max = 4,
2911 .overflow = OVERFLOW_DISABLED,
2912 .ratio = RATIO_ENABLED
2915 /* Compress with Zlib, decompress with compressdev */
2916 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2921 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2922 /* Now test with SGL buffers */
2923 test_data.buff_type = SGL_BOTH;
2924 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2937 test_compressdev_deflate_stateful_decomp_checksum(void)
2939 struct comp_testsuite_params *ts_params = &testsuite_params;
2942 const struct rte_compressdev_capabilities *capab;
2944 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2945 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2947 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2950 /* Check if driver supports any checksum */
2951 if (!(capab->comp_feature_flags &
2952 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2953 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2956 struct rte_comp_xform *compress_xform =
2957 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2958 if (compress_xform == NULL) {
2959 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2963 memcpy(compress_xform, ts_params->def_comp_xform,
2964 sizeof(struct rte_comp_xform));
2966 struct rte_comp_xform *decompress_xform =
2967 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2968 if (decompress_xform == NULL) {
2969 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2970 rte_free(compress_xform);
2974 memcpy(decompress_xform, ts_params->def_decomp_xform,
2975 sizeof(struct rte_comp_xform));
2977 struct interim_data_params int_data = {
2978 &compress_test_bufs[0],
2986 struct test_data_params test_data = {
2987 .compress_state = RTE_COMP_OP_STATELESS,
2988 .decompress_state = RTE_COMP_OP_STATEFUL,
2989 .buff_type = LB_BOTH,
2990 .zlib_dir = ZLIB_COMPRESS,
2993 .decompress_output_block_size = 2000,
2994 .decompress_steps_max = 4,
2995 .overflow = OVERFLOW_DISABLED,
2996 .ratio = RATIO_ENABLED
2999 /* Check if driver supports crc32 checksum and test */
3000 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
3001 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
3002 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
3003 /* Compress with Zlib, decompress with compressdev */
3004 test_data.buff_type = LB_BOTH;
3005 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3009 if (capab->comp_feature_flags &
3010 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
3011 /* Now test with SGL buffers */
3012 test_data.buff_type = SGL_BOTH;
3013 if (test_deflate_comp_decomp(&int_data,
3021 /* Check if driver supports adler32 checksum and test */
3022 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
3023 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
3024 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
3025 /* Compress with Zlib, decompress with compressdev */
3026 test_data.buff_type = LB_BOTH;
3027 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3031 if (capab->comp_feature_flags &
3032 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
3033 /* Now test with SGL buffers */
3034 test_data.buff_type = SGL_BOTH;
3035 if (test_deflate_comp_decomp(&int_data,
3043 /* Check if driver supports combined crc and adler checksum and test */
3044 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
3045 compress_xform->compress.chksum =
3046 RTE_COMP_CHECKSUM_CRC32_ADLER32;
3047 decompress_xform->decompress.chksum =
3048 RTE_COMP_CHECKSUM_CRC32_ADLER32;
3049 /* Zlib doesn't support combined checksum */
3050 test_data.zlib_dir = ZLIB_NONE;
3051 /* Compress stateless, decompress stateful with compressdev */
3052 test_data.buff_type = LB_BOTH;
3053 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3057 if (capab->comp_feature_flags &
3058 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
3059 /* Now test with SGL buffers */
3060 test_data.buff_type = SGL_BOTH;
3061 if (test_deflate_comp_decomp(&int_data,
3072 rte_free(compress_xform);
3073 rte_free(decompress_xform);
3077 static const struct rte_memzone *
3078 make_memzone(const char *name, size_t size)
3080 unsigned int socket_id = rte_socket_id();
3081 char mz_name[RTE_MEMZONE_NAMESIZE];
3082 const struct rte_memzone *memzone;
3084 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
3085 memzone = rte_memzone_lookup(mz_name);
3086 if (memzone != NULL && memzone->len != size) {
3087 rte_memzone_free(memzone);
3090 if (memzone == NULL) {
3091 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
3092 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
3093 if (memzone == NULL)
3094 RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
3101 test_compressdev_external_mbufs(void)
3103 struct comp_testsuite_params *ts_params = &testsuite_params;
3104 size_t data_len = 0;
3106 int ret = TEST_FAILED;
3108 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
3109 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
3111 struct interim_data_params int_data = {
3115 &ts_params->def_comp_xform,
3116 &ts_params->def_decomp_xform,
3120 struct test_data_params test_data = {
3121 .compress_state = RTE_COMP_OP_STATELESS,
3122 .decompress_state = RTE_COMP_OP_STATELESS,
3123 .buff_type = LB_BOTH,
3124 .zlib_dir = ZLIB_DECOMPRESS,
3127 .use_external_mbufs = 1,
3128 .inbuf_data_size = data_len,
3129 .inbuf_memzone = make_memzone("inbuf", data_len),
3130 .compbuf_memzone = make_memzone("compbuf", data_len *
3131 COMPRESS_BUF_SIZE_RATIO),
3132 .uncompbuf_memzone = make_memzone("decompbuf", data_len),
3133 .overflow = OVERFLOW_DISABLED
3136 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3137 /* prepare input data */
3138 data_len = strlen(compress_test_bufs[i]) + 1;
3139 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
3141 test_data.inbuf_data_size = data_len;
3142 int_data.buf_idx = &i;
3144 /* Compress with compressdev, decompress with Zlib */
3145 test_data.zlib_dir = ZLIB_DECOMPRESS;
3146 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3149 /* Compress with Zlib, decompress with compressdev */
3150 test_data.zlib_dir = ZLIB_COMPRESS;
3151 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3158 rte_memzone_free(test_data.inbuf_memzone);
3159 rte_memzone_free(test_data.compbuf_memzone);
3160 rte_memzone_free(test_data.uncompbuf_memzone);
3165 test_compressdev_deflate_stateless_fixed_oos_recoverable(void)
3167 struct comp_testsuite_params *ts_params = &testsuite_params;
3171 const struct rte_compressdev_capabilities *capab;
3173 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3174 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3176 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
3179 struct rte_comp_xform *compress_xform =
3180 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
3182 if (compress_xform == NULL) {
3184 "Compress xform could not be created\n");
3189 memcpy(compress_xform, ts_params->def_comp_xform,
3190 sizeof(struct rte_comp_xform));
3191 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
3193 struct interim_data_params int_data = {
3198 &ts_params->def_decomp_xform,
3202 struct test_data_params test_data = {
3203 .compress_state = RTE_COMP_OP_STATELESS,
3204 .decompress_state = RTE_COMP_OP_STATELESS,
3205 .buff_type = LB_BOTH,
3206 .zlib_dir = ZLIB_DECOMPRESS,
3209 .overflow = OVERFLOW_ENABLED,
3210 .ratio = RATIO_ENABLED
3213 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3214 int_data.test_bufs = &compress_test_bufs[i];
3215 int_data.buf_idx = &i;
3217 /* Compress with compressdev, decompress with Zlib */
3218 test_data.zlib_dir = ZLIB_DECOMPRESS;
3219 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3220 if (comp_result < 0) {
3223 } else if (comp_result > 0) {
3228 /* Compress with Zlib, decompress with compressdev */
3229 test_data.zlib_dir = ZLIB_COMPRESS;
3230 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3231 if (comp_result < 0) {
3234 } else if (comp_result > 0) {
3243 rte_free(compress_xform);
3248 test_compressdev_deflate_im_buffers_LB_1op(void)
3250 struct comp_testsuite_params *ts_params = &testsuite_params;
3252 int ret = TEST_SUCCESS;
3254 const struct rte_compressdev_capabilities *capab;
3255 char *test_buffer = NULL;
3257 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3258 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3260 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3263 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3266 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3267 if (test_buffer == NULL) {
3269 "Can't allocate buffer for 'im buffer' test\n");
3273 struct interim_data_params int_data = {
3274 (const char * const *)&test_buffer,
3277 &ts_params->def_comp_xform,
3278 &ts_params->def_decomp_xform,
3282 struct test_data_params test_data = {
3283 .compress_state = RTE_COMP_OP_STATELESS,
3284 .decompress_state = RTE_COMP_OP_STATELESS,
3285 /* must be LB to SGL,
3286 * input LB buffer reaches its maximum,
3287 * if ratio 1.3 than another mbuf must be
3288 * created and attached
3290 .buff_type = LB_BOTH,
3291 .zlib_dir = ZLIB_NONE,
3294 .overflow = OVERFLOW_DISABLED,
3295 .ratio = RATIO_DISABLED
3298 ts_params->def_comp_xform->compress.deflate.huffman =
3299 RTE_COMP_HUFFMAN_DYNAMIC;
3301 /* fill the buffer with data based on rand. data */
3302 srand(IM_BUF_DATA_TEST_SIZE_LB);
3303 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3304 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3306 /* Compress with compressdev, decompress with compressdev */
3307 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3313 ts_params->def_comp_xform->compress.deflate.huffman =
3314 RTE_COMP_HUFFMAN_DEFAULT;
3315 rte_free(test_buffer);
3320 test_compressdev_deflate_im_buffers_LB_2ops_first(void)
3322 struct comp_testsuite_params *ts_params = &testsuite_params;
3324 int ret = TEST_SUCCESS;
3326 const struct rte_compressdev_capabilities *capab;
3327 char *test_buffer = NULL;
3328 const char *test_buffers[2];
3330 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3331 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3333 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3336 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3339 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3340 if (test_buffer == NULL) {
3342 "Can't allocate buffer for 'im buffer' test\n");
3346 test_buffers[0] = test_buffer;
3347 test_buffers[1] = compress_test_bufs[0];
3349 struct interim_data_params int_data = {
3350 (const char * const *)test_buffers,
3353 &ts_params->def_comp_xform,
3354 &ts_params->def_decomp_xform,
3358 struct test_data_params test_data = {
3359 .compress_state = RTE_COMP_OP_STATELESS,
3360 .decompress_state = RTE_COMP_OP_STATELESS,
3361 .buff_type = LB_BOTH,
3362 .zlib_dir = ZLIB_NONE,
3365 .overflow = OVERFLOW_DISABLED,
3366 .ratio = RATIO_DISABLED
3369 ts_params->def_comp_xform->compress.deflate.huffman =
3370 RTE_COMP_HUFFMAN_DYNAMIC;
3372 /* fill the buffer with data based on rand. data */
3373 srand(IM_BUF_DATA_TEST_SIZE_LB);
3374 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3375 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3377 /* Compress with compressdev, decompress with compressdev */
3378 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3384 ts_params->def_comp_xform->compress.deflate.huffman =
3385 RTE_COMP_HUFFMAN_DEFAULT;
3386 rte_free(test_buffer);
3391 test_compressdev_deflate_im_buffers_LB_2ops_second(void)
3393 struct comp_testsuite_params *ts_params = &testsuite_params;
3395 int ret = TEST_SUCCESS;
3397 const struct rte_compressdev_capabilities *capab;
3398 char *test_buffer = NULL;
3399 const char *test_buffers[2];
3401 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3402 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3404 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3407 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3410 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3411 if (test_buffer == NULL) {
3413 "Can't allocate buffer for 'im buffer' test\n");
3417 test_buffers[0] = compress_test_bufs[0];
3418 test_buffers[1] = test_buffer;
3420 struct interim_data_params int_data = {
3421 (const char * const *)test_buffers,
3424 &ts_params->def_comp_xform,
3425 &ts_params->def_decomp_xform,
3429 struct test_data_params test_data = {
3430 .compress_state = RTE_COMP_OP_STATELESS,
3431 .decompress_state = RTE_COMP_OP_STATELESS,
3432 .buff_type = LB_BOTH,
3433 .zlib_dir = ZLIB_NONE,
3436 .overflow = OVERFLOW_DISABLED,
3437 .ratio = RATIO_DISABLED
3440 ts_params->def_comp_xform->compress.deflate.huffman =
3441 RTE_COMP_HUFFMAN_DYNAMIC;
3443 /* fill the buffer with data based on rand. data */
3444 srand(IM_BUF_DATA_TEST_SIZE_LB);
3445 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3446 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3448 /* Compress with compressdev, decompress with compressdev */
3449 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3455 ts_params->def_comp_xform->compress.deflate.huffman =
3456 RTE_COMP_HUFFMAN_DEFAULT;
3457 rte_free(test_buffer);
3462 test_compressdev_deflate_im_buffers_LB_3ops(void)
3464 struct comp_testsuite_params *ts_params = &testsuite_params;
3466 int ret = TEST_SUCCESS;
3468 const struct rte_compressdev_capabilities *capab;
3469 char *test_buffer = NULL;
3470 const char *test_buffers[3];
3472 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3473 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3475 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3478 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3481 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3482 if (test_buffer == NULL) {
3484 "Can't allocate buffer for 'im buffer' test\n");
3488 test_buffers[0] = compress_test_bufs[0];
3489 test_buffers[1] = test_buffer;
3490 test_buffers[2] = compress_test_bufs[1];
3492 struct interim_data_params int_data = {
3493 (const char * const *)test_buffers,
3496 &ts_params->def_comp_xform,
3497 &ts_params->def_decomp_xform,
3501 struct test_data_params test_data = {
3502 .compress_state = RTE_COMP_OP_STATELESS,
3503 .decompress_state = RTE_COMP_OP_STATELESS,
3504 .buff_type = LB_BOTH,
3505 .zlib_dir = ZLIB_NONE,
3508 .overflow = OVERFLOW_DISABLED,
3509 .ratio = RATIO_DISABLED
3512 ts_params->def_comp_xform->compress.deflate.huffman =
3513 RTE_COMP_HUFFMAN_DYNAMIC;
3515 /* fill the buffer with data based on rand. data */
3516 srand(IM_BUF_DATA_TEST_SIZE_LB);
3517 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3518 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3520 /* Compress with compressdev, decompress with compressdev */
3521 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3527 ts_params->def_comp_xform->compress.deflate.huffman =
3528 RTE_COMP_HUFFMAN_DEFAULT;
3529 rte_free(test_buffer);
3534 test_compressdev_deflate_im_buffers_LB_4ops(void)
3536 struct comp_testsuite_params *ts_params = &testsuite_params;
3538 int ret = TEST_SUCCESS;
3540 const struct rte_compressdev_capabilities *capab;
3541 char *test_buffer = NULL;
3542 const char *test_buffers[4];
3544 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3545 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3547 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3550 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3553 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3554 if (test_buffer == NULL) {
3556 "Can't allocate buffer for 'im buffer' test\n");
3560 test_buffers[0] = compress_test_bufs[0];
3561 test_buffers[1] = test_buffer;
3562 test_buffers[2] = compress_test_bufs[1];
3563 test_buffers[3] = test_buffer;
3565 struct interim_data_params int_data = {
3566 (const char * const *)test_buffers,
3569 &ts_params->def_comp_xform,
3570 &ts_params->def_decomp_xform,
3574 struct test_data_params test_data = {
3575 .compress_state = RTE_COMP_OP_STATELESS,
3576 .decompress_state = RTE_COMP_OP_STATELESS,
3577 .buff_type = LB_BOTH,
3578 .zlib_dir = ZLIB_NONE,
3581 .overflow = OVERFLOW_DISABLED,
3582 .ratio = RATIO_DISABLED
3585 ts_params->def_comp_xform->compress.deflate.huffman =
3586 RTE_COMP_HUFFMAN_DYNAMIC;
3588 /* fill the buffer with data based on rand. data */
3589 srand(IM_BUF_DATA_TEST_SIZE_LB);
3590 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3591 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3593 /* Compress with compressdev, decompress with compressdev */
3594 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3600 ts_params->def_comp_xform->compress.deflate.huffman =
3601 RTE_COMP_HUFFMAN_DEFAULT;
3602 rte_free(test_buffer);
3608 test_compressdev_deflate_im_buffers_SGL_1op(void)
3610 struct comp_testsuite_params *ts_params = &testsuite_params;
3612 int ret = TEST_SUCCESS;
3614 const struct rte_compressdev_capabilities *capab;
3615 char *test_buffer = NULL;
3617 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3618 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3620 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3623 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3626 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3627 if (test_buffer == NULL) {
3629 "Can't allocate buffer for big-data\n");
3633 struct interim_data_params int_data = {
3634 (const char * const *)&test_buffer,
3637 &ts_params->def_comp_xform,
3638 &ts_params->def_decomp_xform,
3642 struct test_data_params test_data = {
3643 .compress_state = RTE_COMP_OP_STATELESS,
3644 .decompress_state = RTE_COMP_OP_STATELESS,
3645 .buff_type = SGL_BOTH,
3646 .zlib_dir = ZLIB_NONE,
3649 .overflow = OVERFLOW_DISABLED,
3650 .ratio = RATIO_DISABLED
3653 ts_params->def_comp_xform->compress.deflate.huffman =
3654 RTE_COMP_HUFFMAN_DYNAMIC;
3656 /* fill the buffer with data based on rand. data */
3657 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3658 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3659 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3661 /* Compress with compressdev, decompress with compressdev */
3662 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3668 ts_params->def_comp_xform->compress.deflate.huffman =
3669 RTE_COMP_HUFFMAN_DEFAULT;
3670 rte_free(test_buffer);
3675 test_compressdev_deflate_im_buffers_SGL_2ops_first(void)
3677 struct comp_testsuite_params *ts_params = &testsuite_params;
3679 int ret = TEST_SUCCESS;
3681 const struct rte_compressdev_capabilities *capab;
3682 char *test_buffer = NULL;
3683 const char *test_buffers[2];
3685 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3686 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3688 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3691 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3694 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3695 if (test_buffer == NULL) {
3697 "Can't allocate buffer for big-data\n");
3701 test_buffers[0] = test_buffer;
3702 test_buffers[1] = compress_test_bufs[0];
3704 struct interim_data_params int_data = {
3705 (const char * const *)test_buffers,
3708 &ts_params->def_comp_xform,
3709 &ts_params->def_decomp_xform,
3713 struct test_data_params test_data = {
3714 .compress_state = RTE_COMP_OP_STATELESS,
3715 .decompress_state = RTE_COMP_OP_STATELESS,
3716 .buff_type = SGL_BOTH,
3717 .zlib_dir = ZLIB_NONE,
3720 .overflow = OVERFLOW_DISABLED,
3721 .ratio = RATIO_DISABLED
3724 ts_params->def_comp_xform->compress.deflate.huffman =
3725 RTE_COMP_HUFFMAN_DYNAMIC;
3727 /* fill the buffer with data based on rand. data */
3728 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3729 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3730 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3732 /* Compress with compressdev, decompress with compressdev */
3733 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3739 ts_params->def_comp_xform->compress.deflate.huffman =
3740 RTE_COMP_HUFFMAN_DEFAULT;
3741 rte_free(test_buffer);
3746 test_compressdev_deflate_im_buffers_SGL_2ops_second(void)
3748 struct comp_testsuite_params *ts_params = &testsuite_params;
3750 int ret = TEST_SUCCESS;
3752 const struct rte_compressdev_capabilities *capab;
3753 char *test_buffer = NULL;
3754 const char *test_buffers[2];
3756 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3757 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3759 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3762 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3765 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3766 if (test_buffer == NULL) {
3768 "Can't allocate buffer for big-data\n");
3772 test_buffers[0] = compress_test_bufs[0];
3773 test_buffers[1] = test_buffer;
3775 struct interim_data_params int_data = {
3776 (const char * const *)test_buffers,
3779 &ts_params->def_comp_xform,
3780 &ts_params->def_decomp_xform,
3784 struct test_data_params test_data = {
3785 .compress_state = RTE_COMP_OP_STATELESS,
3786 .decompress_state = RTE_COMP_OP_STATELESS,
3787 .buff_type = SGL_BOTH,
3788 .zlib_dir = ZLIB_NONE,
3791 .overflow = OVERFLOW_DISABLED,
3792 .ratio = RATIO_DISABLED
3795 ts_params->def_comp_xform->compress.deflate.huffman =
3796 RTE_COMP_HUFFMAN_DYNAMIC;
3798 /* fill the buffer with data based on rand. data */
3799 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3800 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3801 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3803 /* Compress with compressdev, decompress with compressdev */
3804 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3810 ts_params->def_comp_xform->compress.deflate.huffman =
3811 RTE_COMP_HUFFMAN_DEFAULT;
3812 rte_free(test_buffer);
3817 test_compressdev_deflate_im_buffers_SGL_3ops(void)
3819 struct comp_testsuite_params *ts_params = &testsuite_params;
3821 int ret = TEST_SUCCESS;
3823 const struct rte_compressdev_capabilities *capab;
3824 char *test_buffer = NULL;
3825 const char *test_buffers[3];
3827 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3828 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3830 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3833 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3836 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3837 if (test_buffer == NULL) {
3839 "Can't allocate buffer for big-data\n");
3843 test_buffers[0] = compress_test_bufs[0];
3844 test_buffers[1] = test_buffer;
3845 test_buffers[2] = compress_test_bufs[1];
3847 struct interim_data_params int_data = {
3848 (const char * const *)test_buffers,
3851 &ts_params->def_comp_xform,
3852 &ts_params->def_decomp_xform,
3856 struct test_data_params test_data = {
3857 .compress_state = RTE_COMP_OP_STATELESS,
3858 .decompress_state = RTE_COMP_OP_STATELESS,
3859 .buff_type = SGL_BOTH,
3860 .zlib_dir = ZLIB_NONE,
3863 .overflow = OVERFLOW_DISABLED,
3864 .ratio = RATIO_DISABLED
3867 ts_params->def_comp_xform->compress.deflate.huffman =
3868 RTE_COMP_HUFFMAN_DYNAMIC;
3870 /* fill the buffer with data based on rand. data */
3871 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3872 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3873 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3875 /* Compress with compressdev, decompress with compressdev */
3876 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3882 ts_params->def_comp_xform->compress.deflate.huffman =
3883 RTE_COMP_HUFFMAN_DEFAULT;
3884 rte_free(test_buffer);
3890 test_compressdev_deflate_im_buffers_SGL_4ops(void)
3892 struct comp_testsuite_params *ts_params = &testsuite_params;
3894 int ret = TEST_SUCCESS;
3896 const struct rte_compressdev_capabilities *capab;
3897 char *test_buffer = NULL;
3898 const char *test_buffers[4];
3900 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3901 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3903 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3906 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3909 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3910 if (test_buffer == NULL) {
3912 "Can't allocate buffer for big-data\n");
3916 test_buffers[0] = compress_test_bufs[0];
3917 test_buffers[1] = test_buffer;
3918 test_buffers[2] = compress_test_bufs[1];
3919 test_buffers[3] = test_buffer;
3921 struct interim_data_params int_data = {
3922 (const char * const *)test_buffers,
3925 &ts_params->def_comp_xform,
3926 &ts_params->def_decomp_xform,
3930 struct test_data_params test_data = {
3931 .compress_state = RTE_COMP_OP_STATELESS,
3932 .decompress_state = RTE_COMP_OP_STATELESS,
3933 .buff_type = SGL_BOTH,
3934 .zlib_dir = ZLIB_NONE,
3937 .overflow = OVERFLOW_DISABLED,
3938 .ratio = RATIO_DISABLED
3941 ts_params->def_comp_xform->compress.deflate.huffman =
3942 RTE_COMP_HUFFMAN_DYNAMIC;
3944 /* fill the buffer with data based on rand. data */
3945 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3946 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3947 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3949 /* Compress with compressdev, decompress with compressdev */
3950 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3956 ts_params->def_comp_xform->compress.deflate.huffman =
3957 RTE_COMP_HUFFMAN_DEFAULT;
3958 rte_free(test_buffer);
3963 test_compressdev_deflate_im_buffers_SGL_over_1op(void)
3965 struct comp_testsuite_params *ts_params = &testsuite_params;
3967 int ret = TEST_SUCCESS;
3969 const struct rte_compressdev_capabilities *capab;
3970 char *test_buffer = NULL;
3972 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
3974 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3975 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3977 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3980 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3983 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0);
3984 if (test_buffer == NULL) {
3986 "Can't allocate buffer for big-data\n");
3990 struct interim_data_params int_data = {
3991 (const char * const *)&test_buffer,
3994 &ts_params->def_comp_xform,
3995 &ts_params->def_decomp_xform,
3999 struct test_data_params test_data = {
4000 .compress_state = RTE_COMP_OP_STATELESS,
4001 .decompress_state = RTE_COMP_OP_STATELESS,
4002 .buff_type = SGL_BOTH,
4003 .zlib_dir = ZLIB_NONE,
4006 .overflow = OVERFLOW_DISABLED,
4007 .ratio = RATIO_DISABLED
4010 ts_params->def_comp_xform->compress.deflate.huffman =
4011 RTE_COMP_HUFFMAN_DYNAMIC;
4013 /* fill the buffer with data based on rand. data */
4014 srand(IM_BUF_DATA_TEST_SIZE_OVER);
4015 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j)
4016 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
4018 /* Compress with compressdev, decompress with compressdev */
4019 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
4025 ts_params->def_comp_xform->compress.deflate.huffman =
4026 RTE_COMP_HUFFMAN_DEFAULT;
4027 rte_free(test_buffer);
4034 test_compressdev_deflate_im_buffers_SGL_over_2ops_first(void)
4036 struct comp_testsuite_params *ts_params = &testsuite_params;
4038 int ret = TEST_SUCCESS;
4040 const struct rte_compressdev_capabilities *capab;
4041 char *test_buffer = NULL;
4042 const char *test_buffers[2];
4044 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
4046 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
4047 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
4049 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
4052 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
4055 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0);
4056 if (test_buffer == NULL) {
4058 "Can't allocate buffer for big-data\n");
4062 test_buffers[0] = test_buffer;
4063 test_buffers[1] = compress_test_bufs[0];
4065 struct interim_data_params int_data = {
4066 (const char * const *)test_buffers,
4069 &ts_params->def_comp_xform,
4070 &ts_params->def_decomp_xform,
4074 struct test_data_params test_data = {
4075 .compress_state = RTE_COMP_OP_STATELESS,
4076 .decompress_state = RTE_COMP_OP_STATELESS,
4077 .buff_type = SGL_BOTH,
4078 .zlib_dir = ZLIB_NONE,
4081 .overflow = OVERFLOW_DISABLED,
4082 .ratio = RATIO_DISABLED
4085 ts_params->def_comp_xform->compress.deflate.huffman =
4086 RTE_COMP_HUFFMAN_DYNAMIC;
4088 /* fill the buffer with data based on rand. data */
4089 srand(IM_BUF_DATA_TEST_SIZE_OVER);
4090 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j)
4091 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
4093 /* Compress with compressdev, decompress with compressdev */
4094 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
4100 ts_params->def_comp_xform->compress.deflate.huffman =
4101 RTE_COMP_HUFFMAN_DEFAULT;
4102 rte_free(test_buffer);
4107 test_compressdev_deflate_im_buffers_SGL_over_2ops_second(void)
4109 struct comp_testsuite_params *ts_params = &testsuite_params;
4111 int ret = TEST_SUCCESS;
4113 const struct rte_compressdev_capabilities *capab;
4114 char *test_buffer = NULL;
4115 const char *test_buffers[2];
4117 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
4119 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
4120 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
4122 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
4125 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
4128 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0);
4129 if (test_buffer == NULL) {
4131 "Can't allocate buffer for big-data\n");
4135 test_buffers[0] = compress_test_bufs[0];
4136 test_buffers[1] = test_buffer;
4138 struct interim_data_params int_data = {
4139 (const char * const *)test_buffers,
4142 &ts_params->def_comp_xform,
4143 &ts_params->def_decomp_xform,
4147 struct test_data_params test_data = {
4148 .compress_state = RTE_COMP_OP_STATELESS,
4149 .decompress_state = RTE_COMP_OP_STATELESS,
4150 .buff_type = SGL_BOTH,
4151 .zlib_dir = ZLIB_NONE,
4154 .overflow = OVERFLOW_DISABLED,
4155 .ratio = RATIO_DISABLED
4158 ts_params->def_comp_xform->compress.deflate.huffman =
4159 RTE_COMP_HUFFMAN_DYNAMIC;
4161 /* fill the buffer with data based on rand. data */
4162 srand(IM_BUF_DATA_TEST_SIZE_OVER);
4163 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j)
4164 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
4166 /* Compress with compressdev, decompress with compressdev */
4167 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
4173 ts_params->def_comp_xform->compress.deflate.huffman =
4174 RTE_COMP_HUFFMAN_DEFAULT;
4175 rte_free(test_buffer);
4179 static struct unit_test_suite compressdev_testsuite = {
4180 .suite_name = "compressdev unit test suite",
4181 .setup = testsuite_setup,
4182 .teardown = testsuite_teardown,
4183 .unit_test_cases = {
4184 TEST_CASE_ST(NULL, NULL,
4185 test_compressdev_invalid_configuration),
4186 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4187 test_compressdev_deflate_stateless_fixed),
4188 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4189 test_compressdev_deflate_stateless_dynamic),
4190 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4191 test_compressdev_deflate_stateless_dynamic_big),
4192 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4193 test_compressdev_deflate_stateless_multi_op),
4194 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4195 test_compressdev_deflate_stateless_multi_level),
4196 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4197 test_compressdev_deflate_stateless_multi_xform),
4198 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4199 test_compressdev_deflate_stateless_sgl),
4200 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4201 test_compressdev_deflate_stateless_checksum),
4202 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4203 test_compressdev_out_of_space_buffer),
4204 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4205 test_compressdev_deflate_stateful_decomp),
4206 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4207 test_compressdev_deflate_stateful_decomp_checksum),
4208 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4209 test_compressdev_external_mbufs),
4210 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4211 test_compressdev_deflate_stateless_fixed_oos_recoverable),
4213 /* Positive test cases for IM buffer handling verification */
4214 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4215 test_compressdev_deflate_im_buffers_LB_1op),
4216 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4217 test_compressdev_deflate_im_buffers_LB_2ops_first),
4218 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4219 test_compressdev_deflate_im_buffers_LB_2ops_second),
4220 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4221 test_compressdev_deflate_im_buffers_LB_3ops),
4223 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4224 test_compressdev_deflate_im_buffers_LB_4ops),
4225 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4226 test_compressdev_deflate_im_buffers_SGL_1op),
4228 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4229 test_compressdev_deflate_im_buffers_SGL_2ops_first),
4230 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4231 test_compressdev_deflate_im_buffers_SGL_2ops_second),
4232 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4233 test_compressdev_deflate_im_buffers_SGL_3ops),
4234 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4235 test_compressdev_deflate_im_buffers_SGL_4ops),
4237 /* Negative test cases for IM buffer handling verification */
4239 /* For this test huge mempool is necessary.
4240 * It tests one case:
4241 * only one op containing big amount of data, so that
4242 * number of requested descriptors higher than number
4243 * of available descriptors (128)
4245 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4246 test_compressdev_deflate_im_buffers_SGL_over_1op),
4248 /* For this test huge mempool is necessary.
4249 * 2 ops. First op contains big amount of data:
4250 * number of requested descriptors higher than number
4251 * of available descriptors (128), the second op is
4252 * relatively small. In this case both ops are rejected
4254 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4255 test_compressdev_deflate_im_buffers_SGL_over_2ops_first),
4257 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4258 test_compressdev_deflate_im_buffers_SGL_over_2ops_second),
4260 TEST_CASES_END() /**< NULL terminate unit test array */
4265 test_compressdev(void)
4267 return unit_test_suite_runner(&compressdev_testsuite);
4270 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);