1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
15 #include <rte_compressdev.h>
16 #include <rte_string_fns.h>
18 #include "test_compressdev_test_buffer.h"
21 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
23 #define DEFAULT_WINDOW_SIZE 15
24 #define DEFAULT_MEM_LEVEL 8
25 #define MAX_DEQD_RETRIES 10
26 #define DEQUEUE_WAIT_TIME 10000
29 * 30% extra size for compressed data compared to original data,
30 * in case data size cannot be reduced and it is actually bigger
31 * due to the compress block headers
33 #define COMPRESS_BUF_SIZE_RATIO 1.3
34 #define COMPRESS_BUF_SIZE_RATIO_DISABLED 1.0
35 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2
36 #define NUM_LARGE_MBUFS 16
37 #define SMALL_SEG_SIZE 256
40 #define NUM_MAX_XFORMS 16
41 #define NUM_MAX_INFLIGHT_OPS 128
44 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
45 #define ZLIB_HEADER_SIZE 2
46 #define ZLIB_TRAILER_SIZE 4
47 #define GZIP_HEADER_SIZE 10
48 #define GZIP_TRAILER_SIZE 8
50 #define OUT_OF_SPACE_BUF 1
52 #define MAX_MBUF_SEGMENT_SIZE 65535
53 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
54 #define NUM_BIG_MBUFS (512 + 1)
55 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * 2)
57 /* constants for "im buffer" tests start here */
59 /* number of mbufs lower than number of inflight ops */
60 #define IM_BUF_NUM_MBUFS 3
61 /* above threshold (QAT_FALLBACK_THLD) and below max mbuf size */
62 #define IM_BUF_DATA_TEST_SIZE_LB 59600
63 /* data size smaller than the queue capacity */
64 #define IM_BUF_DATA_TEST_SIZE_SGL (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS)
65 /* number of mbufs bigger than number of inflight ops */
66 #define IM_BUF_NUM_MBUFS_OVER (NUM_MAX_INFLIGHT_OPS + 1)
67 /* data size bigger than the queue capacity */
68 #define IM_BUF_DATA_TEST_SIZE_OVER (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_OVER)
69 /* number of mid-size mbufs */
70 #define IM_BUF_NUM_MBUFS_MID ((NUM_MAX_INFLIGHT_OPS / 3) + 1)
71 /* capacity of mid-size mbufs */
72 #define IM_BUF_DATA_TEST_SIZE_MID (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_MID)
76 huffman_type_strings[] = {
77 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
78 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
79 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
90 LB_BOTH = 0, /* both input and output are linear*/
91 SGL_BOTH, /* both input and output are chained */
92 SGL_TO_LB, /* input buffer is chained */
93 LB_TO_SGL /* output buffer is chained */
106 enum operation_type {
107 OPERATION_COMPRESSION,
108 OPERATION_DECOMPRESSION
111 struct priv_op_data {
115 struct comp_testsuite_params {
116 struct rte_mempool *large_mbuf_pool;
117 struct rte_mempool *small_mbuf_pool;
118 struct rte_mempool *big_mbuf_pool;
119 struct rte_mempool *op_pool;
120 struct rte_comp_xform *def_comp_xform;
121 struct rte_comp_xform *def_decomp_xform;
124 struct interim_data_params {
125 const char * const *test_bufs;
126 unsigned int num_bufs;
128 struct rte_comp_xform **compress_xforms;
129 struct rte_comp_xform **decompress_xforms;
130 unsigned int num_xforms;
133 struct test_data_params {
134 enum rte_comp_op_type compress_state;
135 enum rte_comp_op_type decompress_state;
136 enum varied_buff buff_type;
137 enum zlib_direction zlib_dir;
138 unsigned int out_of_space;
139 unsigned int big_data;
140 /* stateful decompression specific parameters */
141 unsigned int decompress_output_block_size;
142 unsigned int decompress_steps_max;
143 /* external mbufs specific parameters */
144 unsigned int use_external_mbufs;
145 unsigned int inbuf_data_size;
146 const struct rte_memzone *inbuf_memzone;
147 const struct rte_memzone *compbuf_memzone;
148 const struct rte_memzone *uncompbuf_memzone;
149 /* overflow test activation */
150 enum overflow_test overflow;
151 enum ratio_switch ratio;
154 struct test_private_arrays {
155 struct rte_mbuf **uncomp_bufs;
156 struct rte_mbuf **comp_bufs;
157 struct rte_comp_op **ops;
158 struct rte_comp_op **ops_processed;
160 uint64_t *compress_checksum;
161 uint32_t *compressed_data_size;
163 char **all_decomp_data;
164 unsigned int *decomp_produced_data_size;
165 uint16_t num_priv_xforms;
168 static struct comp_testsuite_params testsuite_params = { 0 };
172 testsuite_teardown(void)
174 struct comp_testsuite_params *ts_params = &testsuite_params;
176 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
177 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
178 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
179 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
180 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
181 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
182 if (rte_mempool_in_use_count(ts_params->op_pool))
183 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
185 rte_mempool_free(ts_params->large_mbuf_pool);
186 rte_mempool_free(ts_params->small_mbuf_pool);
187 rte_mempool_free(ts_params->big_mbuf_pool);
188 rte_mempool_free(ts_params->op_pool);
189 rte_free(ts_params->def_comp_xform);
190 rte_free(ts_params->def_decomp_xform);
194 testsuite_setup(void)
196 struct comp_testsuite_params *ts_params = &testsuite_params;
197 uint32_t max_buf_size = 0;
200 if (rte_compressdev_count() == 0) {
201 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
205 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
206 rte_compressdev_name_get(0));
208 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
209 max_buf_size = RTE_MAX(max_buf_size,
210 strlen(compress_test_bufs[i]) + 1);
213 * Buffers to be used in compression and decompression.
214 * Since decompressed data might be larger than
215 * compressed data (due to block header),
216 * buffers should be big enough for both cases.
218 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
219 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
222 max_buf_size + RTE_PKTMBUF_HEADROOM,
224 if (ts_params->large_mbuf_pool == NULL) {
225 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
229 /* Create mempool with smaller buffers for SGL testing */
230 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
231 NUM_LARGE_MBUFS * MAX_SEGS,
233 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
235 if (ts_params->small_mbuf_pool == NULL) {
236 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
240 /* Create mempool with big buffers for SGL testing */
241 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
244 MAX_MBUF_SEGMENT_SIZE,
246 if (ts_params->big_mbuf_pool == NULL) {
247 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
251 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
252 0, sizeof(struct priv_op_data),
254 if (ts_params->op_pool == NULL) {
255 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
259 ts_params->def_comp_xform =
260 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
261 if (ts_params->def_comp_xform == NULL) {
263 "Default compress xform could not be created\n");
266 ts_params->def_decomp_xform =
267 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
268 if (ts_params->def_decomp_xform == NULL) {
270 "Default decompress xform could not be created\n");
274 /* Initializes default values for compress/decompress xforms */
275 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
276 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
277 ts_params->def_comp_xform->compress.deflate.huffman =
278 RTE_COMP_HUFFMAN_DEFAULT;
279 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
280 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
281 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
283 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
284 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
285 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
286 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
291 testsuite_teardown();
297 generic_ut_setup(void)
299 /* Configure compressdev (one device, one queue pair) */
300 struct rte_compressdev_config config = {
301 .socket_id = rte_socket_id(),
303 .max_nb_priv_xforms = NUM_MAX_XFORMS,
307 if (rte_compressdev_configure(0, &config) < 0) {
308 RTE_LOG(ERR, USER1, "Device configuration failed\n");
312 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
313 rte_socket_id()) < 0) {
314 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
318 if (rte_compressdev_start(0) < 0) {
319 RTE_LOG(ERR, USER1, "Device could not be started\n");
327 generic_ut_teardown(void)
329 rte_compressdev_stop(0);
330 if (rte_compressdev_close(0) < 0)
331 RTE_LOG(ERR, USER1, "Device could not be closed\n");
335 test_compressdev_invalid_configuration(void)
337 struct rte_compressdev_config invalid_config;
338 struct rte_compressdev_config valid_config = {
339 .socket_id = rte_socket_id(),
341 .max_nb_priv_xforms = NUM_MAX_XFORMS,
344 struct rte_compressdev_info dev_info;
346 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
348 /* Invalid configuration with 0 queue pairs */
349 memcpy(&invalid_config, &valid_config,
350 sizeof(struct rte_compressdev_config));
351 invalid_config.nb_queue_pairs = 0;
353 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
354 "Device configuration was successful "
355 "with no queue pairs (invalid)\n");
358 * Invalid configuration with too many queue pairs
359 * (if there is an actual maximum number of queue pairs)
361 rte_compressdev_info_get(0, &dev_info);
362 if (dev_info.max_nb_queue_pairs != 0) {
363 memcpy(&invalid_config, &valid_config,
364 sizeof(struct rte_compressdev_config));
365 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
367 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
368 "Device configuration was successful "
369 "with too many queue pairs (invalid)\n");
372 /* Invalid queue pair setup, with no number of queue pairs set */
373 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
374 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
375 "Queue pair setup was successful "
376 "with no queue pairs set (invalid)\n");
382 compare_buffers(const char *buffer1, uint32_t buffer1_len,
383 const char *buffer2, uint32_t buffer2_len)
385 if (buffer1_len != buffer2_len) {
386 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
390 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
391 RTE_LOG(ERR, USER1, "Buffers are different\n");
399 * Maps compressdev and Zlib flush flags
402 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
405 case RTE_COMP_FLUSH_NONE:
407 case RTE_COMP_FLUSH_SYNC:
409 case RTE_COMP_FLUSH_FULL:
411 case RTE_COMP_FLUSH_FINAL:
414 * There should be only the values above,
415 * so this should never happen
423 compress_zlib(struct rte_comp_op *op,
424 const struct rte_comp_xform *xform, int mem_level)
428 int strategy, window_bits, comp_level;
429 int ret = TEST_FAILED;
430 uint8_t *single_src_buf = NULL;
431 uint8_t *single_dst_buf = NULL;
433 /* initialize zlib stream */
434 stream.zalloc = Z_NULL;
435 stream.zfree = Z_NULL;
436 stream.opaque = Z_NULL;
438 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
441 strategy = Z_DEFAULT_STRATEGY;
444 * Window bits is the base two logarithm of the window size (in bytes).
445 * When doing raw DEFLATE, this number will be negative.
447 window_bits = -(xform->compress.window_size);
448 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
450 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
451 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
453 comp_level = xform->compress.level;
455 if (comp_level != RTE_COMP_LEVEL_NONE)
456 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
457 window_bits, mem_level, strategy);
459 ret = deflateInit(&stream, Z_NO_COMPRESSION);
462 printf("Zlib deflate could not be initialized\n");
466 /* Assuming stateless operation */
468 if (op->m_src->nb_segs > 1) {
469 single_src_buf = rte_malloc(NULL,
470 rte_pktmbuf_pkt_len(op->m_src), 0);
471 if (single_src_buf == NULL) {
472 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
476 if (rte_pktmbuf_read(op->m_src, op->src.offset,
477 rte_pktmbuf_pkt_len(op->m_src) -
479 single_src_buf) == NULL) {
481 "Buffer could not be read entirely\n");
485 stream.avail_in = op->src.length;
486 stream.next_in = single_src_buf;
489 stream.avail_in = op->src.length;
490 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
494 if (op->m_dst->nb_segs > 1) {
496 single_dst_buf = rte_malloc(NULL,
497 rte_pktmbuf_pkt_len(op->m_dst), 0);
498 if (single_dst_buf == NULL) {
500 "Buffer could not be allocated\n");
504 stream.avail_out = op->m_dst->pkt_len;
505 stream.next_out = single_dst_buf;
507 } else {/* linear output */
508 stream.avail_out = op->m_dst->data_len;
509 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
513 /* Stateless operation, all buffer will be compressed in one go */
514 zlib_flush = map_zlib_flush_flag(op->flush_flag);
515 ret = deflate(&stream, zlib_flush);
517 if (stream.avail_in != 0) {
518 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
522 if (ret != Z_STREAM_END)
525 /* Copy data to destination SGL */
526 if (op->m_dst->nb_segs > 1) {
527 uint32_t remaining_data = stream.total_out;
528 uint8_t *src_data = single_dst_buf;
529 struct rte_mbuf *dst_buf = op->m_dst;
531 while (remaining_data > 0) {
532 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
533 uint8_t *, op->dst.offset);
535 if (remaining_data < dst_buf->data_len) {
536 memcpy(dst_data, src_data, remaining_data);
539 memcpy(dst_data, src_data, dst_buf->data_len);
540 remaining_data -= dst_buf->data_len;
541 src_data += dst_buf->data_len;
542 dst_buf = dst_buf->next;
547 op->consumed = stream.total_in;
548 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
549 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
550 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
551 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
553 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
554 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
555 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
556 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
559 op->produced = stream.total_out;
561 op->status = RTE_COMP_OP_STATUS_SUCCESS;
562 op->output_chksum = stream.adler;
564 deflateReset(&stream);
569 rte_free(single_src_buf);
570 rte_free(single_dst_buf);
576 decompress_zlib(struct rte_comp_op *op,
577 const struct rte_comp_xform *xform)
582 int ret = TEST_FAILED;
583 uint8_t *single_src_buf = NULL;
584 uint8_t *single_dst_buf = NULL;
586 /* initialize zlib stream */
587 stream.zalloc = Z_NULL;
588 stream.zfree = Z_NULL;
589 stream.opaque = Z_NULL;
592 * Window bits is the base two logarithm of the window size (in bytes).
593 * When doing raw DEFLATE, this number will be negative.
595 window_bits = -(xform->decompress.window_size);
596 ret = inflateInit2(&stream, window_bits);
599 printf("Zlib deflate could not be initialized\n");
603 /* Assuming stateless operation */
605 if (op->m_src->nb_segs > 1) {
606 single_src_buf = rte_malloc(NULL,
607 rte_pktmbuf_pkt_len(op->m_src), 0);
608 if (single_src_buf == NULL) {
609 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
612 single_dst_buf = rte_malloc(NULL,
613 rte_pktmbuf_pkt_len(op->m_dst), 0);
614 if (single_dst_buf == NULL) {
615 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
618 if (rte_pktmbuf_read(op->m_src, 0,
619 rte_pktmbuf_pkt_len(op->m_src),
620 single_src_buf) == NULL) {
622 "Buffer could not be read entirely\n");
626 stream.avail_in = op->src.length;
627 stream.next_in = single_src_buf;
628 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
629 stream.next_out = single_dst_buf;
632 stream.avail_in = op->src.length;
633 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
634 stream.avail_out = op->m_dst->data_len;
635 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
638 /* Stateless operation, all buffer will be compressed in one go */
639 zlib_flush = map_zlib_flush_flag(op->flush_flag);
640 ret = inflate(&stream, zlib_flush);
642 if (stream.avail_in != 0) {
643 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
647 if (ret != Z_STREAM_END)
650 if (op->m_src->nb_segs > 1) {
651 uint32_t remaining_data = stream.total_out;
652 uint8_t *src_data = single_dst_buf;
653 struct rte_mbuf *dst_buf = op->m_dst;
655 while (remaining_data > 0) {
656 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
659 if (remaining_data < dst_buf->data_len) {
660 memcpy(dst_data, src_data, remaining_data);
663 memcpy(dst_data, src_data, dst_buf->data_len);
664 remaining_data -= dst_buf->data_len;
665 src_data += dst_buf->data_len;
666 dst_buf = dst_buf->next;
671 op->consumed = stream.total_in;
672 op->produced = stream.total_out;
673 op->status = RTE_COMP_OP_STATUS_SUCCESS;
675 inflateReset(&stream);
685 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
686 uint32_t total_data_size,
687 struct rte_mempool *small_mbuf_pool,
688 struct rte_mempool *large_mbuf_pool,
689 uint8_t limit_segs_in_sgl,
692 uint32_t remaining_data = total_data_size;
693 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
694 struct rte_mempool *pool;
695 struct rte_mbuf *next_seg;
698 const char *data_ptr = test_buf;
702 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
703 num_remaining_segs = limit_segs_in_sgl - 1;
706 * Allocate data in the first segment (header) and
707 * copy data if test buffer is provided
709 if (remaining_data < seg_size)
710 data_size = remaining_data;
712 data_size = seg_size;
714 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
715 if (buf_ptr == NULL) {
717 "Not enough space in the 1st buffer\n");
721 if (data_ptr != NULL) {
722 /* Copy characters without NULL terminator */
723 memcpy(buf_ptr, data_ptr, data_size);
724 data_ptr += data_size;
726 remaining_data -= data_size;
727 num_remaining_segs--;
730 * Allocate the rest of the segments,
731 * copy the rest of the data and chain the segments.
733 for (i = 0; i < num_remaining_segs; i++) {
735 if (i == (num_remaining_segs - 1)) {
737 if (remaining_data > seg_size)
738 pool = large_mbuf_pool;
740 pool = small_mbuf_pool;
741 data_size = remaining_data;
743 data_size = seg_size;
744 pool = small_mbuf_pool;
747 next_seg = rte_pktmbuf_alloc(pool);
748 if (next_seg == NULL) {
750 "New segment could not be allocated "
751 "from the mempool\n");
754 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
755 if (buf_ptr == NULL) {
757 "Not enough space in the buffer\n");
758 rte_pktmbuf_free(next_seg);
761 if (data_ptr != NULL) {
762 /* Copy characters without NULL terminator */
763 memcpy(buf_ptr, data_ptr, data_size);
764 data_ptr += data_size;
766 remaining_data -= data_size;
768 ret = rte_pktmbuf_chain(head_buf, next_seg);
770 rte_pktmbuf_free(next_seg);
772 "Segment could not chained\n");
781 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
786 test_run_enqueue_dequeue(struct rte_comp_op **ops,
787 struct rte_comp_op **ops_processed,
788 unsigned int num_bufs)
790 uint16_t num_enqd, num_deqd, num_total_deqd;
791 unsigned int deqd_retries = 0;
794 /* Enqueue and dequeue all operations */
795 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
796 if (num_enqd < num_bufs) {
798 "Some operations could not be enqueued\n");
802 /* dequeue ops even on error (same number of ops as was enqueued) */
805 while (num_total_deqd < num_enqd) {
807 * If retrying a dequeue call, wait for 10 ms to allow
808 * enough time to the driver to process the operations
810 if (deqd_retries != 0) {
812 * Avoid infinite loop if not all the
813 * operations get out of the device
815 if (deqd_retries == MAX_DEQD_RETRIES) {
817 "Not all operations could be dequeued\n");
821 usleep(DEQUEUE_WAIT_TIME);
823 num_deqd = rte_compressdev_dequeue_burst(0, 0,
824 &ops_processed[num_total_deqd], num_bufs);
825 num_total_deqd += num_deqd;
834 * Arrays initialization. Input buffers preparation for compression.
836 * API that initializes all the private arrays to NULL
837 * and allocates input buffers to perform compression operations.
840 * Interim data containing session/transformation objects.
842 * The test parameters set by users (command line parameters).
843 * @param test_priv_data
844 * A container used for aggregation all the private test arrays.
850 test_setup_com_bufs(const struct interim_data_params *int_data,
851 const struct test_data_params *test_data,
852 const struct test_private_arrays *test_priv_data)
854 /* local variables: */
859 char **all_decomp_data = test_priv_data->all_decomp_data;
861 struct comp_testsuite_params *ts_params = &testsuite_params;
864 const char * const *test_bufs = int_data->test_bufs;
865 unsigned int num_bufs = int_data->num_bufs;
867 /* from test_data: */
868 unsigned int buff_type = test_data->buff_type;
869 unsigned int big_data = test_data->big_data;
871 /* from test_priv_data: */
872 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
873 struct rte_mempool *buf_pool;
875 static struct rte_mbuf_ext_shared_info inbuf_info;
877 size_t array_size = sizeof(void *) * num_bufs;
879 /* Initialize all arrays to NULL */
880 memset(test_priv_data->uncomp_bufs, 0, array_size);
881 memset(test_priv_data->comp_bufs, 0, array_size);
882 memset(test_priv_data->ops, 0, array_size);
883 memset(test_priv_data->ops_processed, 0, array_size);
884 memset(test_priv_data->priv_xforms, 0, array_size);
885 memset(test_priv_data->compressed_data_size,
886 0, sizeof(uint32_t) * num_bufs);
888 if (test_data->decompress_state == RTE_COMP_OP_STATEFUL) {
889 data_size = strlen(test_bufs[0]) + 1;
890 *all_decomp_data = rte_malloc(NULL, data_size,
891 RTE_CACHE_LINE_SIZE);
895 buf_pool = ts_params->big_mbuf_pool;
896 else if (buff_type == SGL_BOTH)
897 buf_pool = ts_params->small_mbuf_pool;
899 buf_pool = ts_params->large_mbuf_pool;
901 /* for compression uncomp_bufs is used as a source buffer */
902 /* allocation from buf_pool (mempool type) */
903 ret = rte_pktmbuf_alloc_bulk(buf_pool,
904 uncomp_bufs, num_bufs);
907 "Source mbufs could not be allocated "
908 "from the mempool\n");
912 if (test_data->use_external_mbufs) {
913 inbuf_info.free_cb = extbuf_free_callback;
914 inbuf_info.fcb_opaque = NULL;
915 rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
916 for (i = 0; i < num_bufs; i++) {
917 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
918 test_data->inbuf_memzone->addr,
919 test_data->inbuf_memzone->iova,
920 test_data->inbuf_data_size,
922 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i],
923 test_data->inbuf_data_size);
924 if (buf_ptr == NULL) {
926 "Append extra bytes to the source mbuf failed\n");
930 } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
931 for (i = 0; i < num_bufs; i++) {
932 data_size = strlen(test_bufs[i]) + 1;
933 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
935 big_data ? buf_pool : ts_params->small_mbuf_pool,
936 big_data ? buf_pool : ts_params->large_mbuf_pool,
937 big_data ? 0 : MAX_SEGS,
938 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
942 for (i = 0; i < num_bufs; i++) {
943 data_size = strlen(test_bufs[i]) + 1;
945 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
946 if (buf_ptr == NULL) {
948 "Append extra bytes to the source mbuf failed\n");
951 strlcpy(buf_ptr, test_bufs[i], data_size);
959 * Data size calculation (for both compression and decompression).
961 * Calculate size of anticipated output buffer required for both
962 * compression and decompression operations based on input int_data.
965 * Operation type: compress or decompress
966 * @param out_of_space_and_zlib
967 * Boolean value to switch into "out of space" buffer if set.
968 * To test "out-of-space" data size, zlib_decompress must be set as well.
969 * @param test_priv_data
970 * A container used for aggregation all the private test arrays.
972 * Interim data containing session/transformation objects.
974 * The test parameters set by users (command line parameters).
976 * current buffer index
980 static inline uint32_t
981 test_mbufs_calculate_data_size(
982 enum operation_type op_type,
983 unsigned int out_of_space_and_zlib,
984 const struct test_private_arrays *test_priv_data,
985 const struct interim_data_params *int_data,
986 const struct test_data_params *test_data,
989 /* local variables: */
991 struct priv_op_data *priv_data;
993 enum ratio_switch ratio = test_data->ratio;
995 uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */
996 enum overflow_test overflow = test_data->overflow;
998 /* from test_priv_data: */
999 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1001 /* from int_data: */
1002 const char * const *test_bufs = int_data->test_bufs;
1004 if (out_of_space_and_zlib)
1005 data_size = OUT_OF_SPACE_BUF;
1007 if (op_type == OPERATION_COMPRESSION) {
1008 not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS
1009 || test_data->zlib_dir == ZLIB_NONE);
1011 ratio_val = (ratio == RATIO_ENABLED) ?
1012 COMPRESS_BUF_SIZE_RATIO :
1013 COMPRESS_BUF_SIZE_RATIO_DISABLED;
1015 ratio_val = (not_zlib_compr &&
1016 (overflow == OVERFLOW_ENABLED)) ?
1017 COMPRESS_BUF_SIZE_RATIO_OVERFLOW :
1020 data_size = strlen(test_bufs[i]) * ratio_val;
1022 priv_data = (struct priv_op_data *)
1023 (ops_processed[i] + 1);
1024 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
1033 * Memory buffers preparation (for both compression and decompression).
1035 * Function allocates output buffers to perform compression
1036 * or decompression operations depending on value of op_type.
1039 * Operation type: compress or decompress
1040 * @param out_of_space_and_zlib
1041 * Boolean value to switch into "out of space" buffer if set.
1042 * To test "out-of-space" data size, zlib_decompress must be set as well.
1043 * @param test_priv_data
1044 * A container used for aggregation all the private test arrays.
1046 * Interim data containing session/transformation objects.
1048 * The test parameters set by users (command line parameters).
1049 * @param current_extbuf_info,
1050 * The structure containing all the information related to external mbufs
1056 test_setup_output_bufs(
1057 enum operation_type op_type,
1058 unsigned int out_of_space_and_zlib,
1059 const struct test_private_arrays *test_priv_data,
1060 const struct interim_data_params *int_data,
1061 const struct test_data_params *test_data,
1062 struct rte_mbuf_ext_shared_info *current_extbuf_info)
1064 /* local variables: */
1070 /* from test_priv_data: */
1071 struct rte_mbuf **current_bufs;
1073 /* from int_data: */
1074 unsigned int num_bufs = int_data->num_bufs;
1076 /* from test_data: */
1077 unsigned int buff_type = test_data->buff_type;
1078 unsigned int big_data = test_data->big_data;
1079 const struct rte_memzone *current_memzone;
1081 struct comp_testsuite_params *ts_params = &testsuite_params;
1082 struct rte_mempool *buf_pool;
1085 buf_pool = ts_params->big_mbuf_pool;
1086 else if (buff_type == SGL_BOTH)
1087 buf_pool = ts_params->small_mbuf_pool;
1089 buf_pool = ts_params->large_mbuf_pool;
1091 if (op_type == OPERATION_COMPRESSION)
1092 current_bufs = test_priv_data->comp_bufs;
1094 current_bufs = test_priv_data->uncomp_bufs;
1096 /* the mbufs allocation*/
1097 ret = rte_pktmbuf_alloc_bulk(buf_pool, current_bufs, num_bufs);
1100 "Destination mbufs could not be allocated "
1101 "from the mempool\n");
1105 if (test_data->use_external_mbufs) {
1106 current_extbuf_info->free_cb = extbuf_free_callback;
1107 current_extbuf_info->fcb_opaque = NULL;
1108 rte_mbuf_ext_refcnt_set(current_extbuf_info, 1);
1109 if (op_type == OPERATION_COMPRESSION)
1110 current_memzone = test_data->compbuf_memzone;
1112 current_memzone = test_data->uncompbuf_memzone;
1114 for (i = 0; i < num_bufs; i++) {
1115 rte_pktmbuf_attach_extbuf(current_bufs[i],
1116 current_memzone->addr,
1117 current_memzone->iova,
1118 current_memzone->len,
1119 current_extbuf_info);
1120 rte_pktmbuf_append(current_bufs[i],
1121 current_memzone->len);
1124 for (i = 0; i < num_bufs; i++) {
1126 enum rte_comp_huffman comp_huffman =
1127 ts_params->def_comp_xform->compress.deflate.huffman;
1129 /* data size calculation */
1130 data_size = test_mbufs_calculate_data_size(
1132 out_of_space_and_zlib,
1138 if (comp_huffman != RTE_COMP_HUFFMAN_DYNAMIC) {
1139 if (op_type == OPERATION_DECOMPRESSION)
1140 data_size *= COMPRESS_BUF_SIZE_RATIO;
1143 /* data allocation */
1144 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1145 ret = prepare_sgl_bufs(NULL, current_bufs[i],
1147 big_data ? buf_pool :
1148 ts_params->small_mbuf_pool,
1149 big_data ? buf_pool :
1150 ts_params->large_mbuf_pool,
1151 big_data ? 0 : MAX_SEGS,
1152 big_data ? MAX_DATA_MBUF_SIZE :
1157 buf_ptr = rte_pktmbuf_append(current_bufs[i],
1159 if (buf_ptr == NULL) {
1161 "Append extra bytes to the destination mbuf failed\n");
1172 * The main compression function.
1174 * Function performs compression operation.
1175 * Operation(s) configuration, depending on CLI parameters.
1176 * Operation(s) processing.
1179 * Interim data containing session/transformation objects.
1181 * The test parameters set by users (command line parameters).
1182 * @param test_priv_data
1183 * A container used for aggregation all the private test arrays.
1189 test_deflate_comp_run(const struct interim_data_params *int_data,
1190 const struct test_data_params *test_data,
1191 const struct test_private_arrays *test_priv_data)
1193 /* local variables: */
1194 struct priv_op_data *priv_data;
1196 uint16_t num_priv_xforms = 0;
1201 struct comp_testsuite_params *ts_params = &testsuite_params;
1203 /* from test_data: */
1204 enum rte_comp_op_type operation_type = test_data->compress_state;
1205 unsigned int zlib_compress =
1206 (test_data->zlib_dir == ZLIB_ALL ||
1207 test_data->zlib_dir == ZLIB_COMPRESS);
1209 /* from int_data: */
1210 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1211 unsigned int num_xforms = int_data->num_xforms;
1212 unsigned int num_bufs = int_data->num_bufs;
1214 /* from test_priv_data: */
1215 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1216 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1217 struct rte_comp_op **ops = test_priv_data->ops;
1218 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1219 void **priv_xforms = test_priv_data->priv_xforms;
1221 const struct rte_compressdev_capabilities *capa =
1222 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1224 /* Build the compression operations */
1225 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1228 "Compress operations could not be allocated "
1229 "from the mempool\n");
1234 for (i = 0; i < num_bufs; i++) {
1235 ops[i]->m_src = uncomp_bufs[i];
1236 ops[i]->m_dst = comp_bufs[i];
1237 ops[i]->src.offset = 0;
1238 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
1239 ops[i]->dst.offset = 0;
1241 RTE_LOG(DEBUG, USER1,
1242 "Uncompressed buffer length = %u compressed buffer length = %u",
1243 rte_pktmbuf_pkt_len(uncomp_bufs[i]),
1244 rte_pktmbuf_pkt_len(comp_bufs[i]));
1246 if (operation_type == RTE_COMP_OP_STATELESS) {
1247 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1250 "Compression: stateful operations are not "
1251 "supported in these tests yet\n");
1255 ops[i]->input_chksum = 0;
1257 * Store original operation index in private data,
1258 * since ordering does not have to be maintained,
1259 * when dequeueing from compressdev, so a comparison
1260 * at the end of the test can be done.
1262 priv_data = (struct priv_op_data *) (ops[i] + 1);
1263 priv_data->orig_idx = i;
1266 /* Compress data (either with Zlib API or compressdev API */
1267 if (zlib_compress) {
1268 for (i = 0; i < num_bufs; i++) {
1269 const struct rte_comp_xform *compress_xform =
1270 compress_xforms[i % num_xforms];
1271 ret = compress_zlib(ops[i], compress_xform,
1278 ops_processed[i] = ops[i];
1281 /* Create compress private xform data */
1282 for (i = 0; i < num_xforms; i++) {
1283 ret = rte_compressdev_private_xform_create(0,
1284 (const struct rte_comp_xform *)
1289 "Compression private xform "
1290 "could not be created\n");
1296 if (capa->comp_feature_flags &
1297 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1298 /* Attach shareable private xform data to ops */
1299 for (i = 0; i < num_bufs; i++)
1300 ops[i]->private_xform =
1301 priv_xforms[i % num_xforms];
1303 /* Create rest of the private xforms for the other ops */
1304 for (i = num_xforms; i < num_bufs; i++) {
1305 ret = rte_compressdev_private_xform_create(0,
1306 compress_xforms[i % num_xforms],
1310 "Compression private xform "
1311 "could not be created\n");
1317 /* Attach non shareable private xform data to ops */
1318 for (i = 0; i < num_bufs; i++)
1319 ops[i]->private_xform = priv_xforms[i];
1323 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
1326 "Compression: enqueue/dequeue operation failed\n");
1331 for (i = 0; i < num_bufs; i++) {
1332 test_priv_data->compressed_data_size[i] +=
1333 ops_processed[i]->produced;
1335 if (ops_processed[i]->status ==
1336 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
1339 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1340 ops[i]->src.offset +=
1341 ops_processed[i]->consumed;
1342 ops[i]->src.length -=
1343 ops_processed[i]->consumed;
1344 ops[i]->dst.offset +=
1345 ops_processed[i]->produced;
1347 buf_ptr = rte_pktmbuf_append(
1349 ops_processed[i]->produced);
1351 if (buf_ptr == NULL) {
1353 "Data recovery: append extra bytes to the current mbuf failed\n");
1363 /* Free resources */
1365 for (i = 0; i < num_bufs; i++) {
1366 rte_comp_op_free(ops[i]);
1368 ops_processed[i] = NULL;
1371 /* Free compress private xforms */
1372 for (i = 0; i < num_priv_xforms; i++) {
1373 if (priv_xforms[i] != NULL) {
1374 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1375 priv_xforms[i] = NULL;
1383 * Prints out the test report. Memory freeing.
1385 * Called after successful compression.
1386 * Operation(s) status validation and decompression buffers freeing.
1388 * -1 returned if function fail.
1391 * Interim data containing session/transformation objects.
1393 * The test parameters set by users (command line parameters).
1394 * @param test_priv_data
1395 * A container used for aggregation all the private test arrays.
1397 * - 2: Some operation is not supported
1398 * - 1: Decompression should be skipped
1403 test_deflate_comp_finalize(const struct interim_data_params *int_data,
1404 const struct test_data_params *test_data,
1405 const struct test_private_arrays *test_priv_data)
1407 /* local variables: */
1409 struct priv_op_data *priv_data;
1411 /* from int_data: */
1412 unsigned int num_xforms = int_data->num_xforms;
1413 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1414 unsigned int num_bufs = int_data->num_bufs;
1416 /* from test_priv_data: */
1417 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1418 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1419 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1420 struct rte_comp_op **ops = test_priv_data->ops;
1422 /* from test_data: */
1423 unsigned int out_of_space = test_data->out_of_space;
1424 unsigned int zlib_compress =
1425 (test_data->zlib_dir == ZLIB_ALL ||
1426 test_data->zlib_dir == ZLIB_COMPRESS);
1427 unsigned int zlib_decompress =
1428 (test_data->zlib_dir == ZLIB_ALL ||
1429 test_data->zlib_dir == ZLIB_DECOMPRESS);
1431 for (i = 0; i < num_bufs; i++) {
1432 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1433 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1434 const struct rte_comp_compress_xform *compress_xform =
1435 &compress_xforms[xform_idx]->compress;
1436 enum rte_comp_huffman huffman_type =
1437 compress_xform->deflate.huffman;
1438 char engine[] = "zlib (directly, not PMD)";
1439 if (zlib_decompress)
1440 strlcpy(engine, "PMD", sizeof(engine));
1442 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1443 " %u bytes (level = %d, huffman = %s)\n",
1445 ops_processed[i]->consumed, ops_processed[i]->produced,
1446 compress_xform->level,
1447 huffman_type_strings[huffman_type]);
1448 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1449 ops_processed[i]->consumed == 0 ? 0 :
1450 (float)ops_processed[i]->produced /
1451 ops_processed[i]->consumed * 100);
1452 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1453 compress_checksum[i] = ops_processed[i]->output_chksum;
1458 * Check operation status and free source mbufs (destination mbuf and
1459 * compress operation information is needed for the decompression stage)
1461 for (i = 0; i < num_bufs; i++) {
1462 if (out_of_space && !zlib_compress) {
1463 if (ops_processed[i]->status !=
1464 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1466 "Operation without expected out of "
1467 "space status error\n");
1473 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1474 if (test_data->overflow == OVERFLOW_ENABLED) {
1475 if (ops_processed[i]->status ==
1476 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1477 RTE_LOG(INFO, USER1,
1478 "Out-of-space-recoverable functionality"
1479 " is not supported on this device\n");
1485 "Comp: Some operations were not successful\n");
1488 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1489 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1490 uncomp_bufs[priv_data->orig_idx] = NULL;
1493 if (out_of_space && !zlib_compress)
1500 * The main decompression function.
1502 * Function performs decompression operation.
1503 * Operation(s) configuration, depending on CLI parameters.
1504 * Operation(s) processing.
1507 * Interim data containing session/transformation objects.
1509 * The test parameters set by users (command line parameters).
1510 * @param test_priv_data
1511 * A container used for aggregation all the private test arrays.
1517 test_deflate_decomp_run(const struct interim_data_params *int_data,
1518 const struct test_data_params *test_data,
1519 struct test_private_arrays *test_priv_data)
1522 /* local variables: */
1523 struct priv_op_data *priv_data;
1525 uint16_t num_priv_xforms = 0;
1529 struct comp_testsuite_params *ts_params = &testsuite_params;
1531 /* from test_data: */
1532 enum rte_comp_op_type operation_type = test_data->decompress_state;
1533 unsigned int zlib_decompress =
1534 (test_data->zlib_dir == ZLIB_ALL ||
1535 test_data->zlib_dir == ZLIB_DECOMPRESS);
1537 /* from int_data: */
1538 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
1539 unsigned int num_xforms = int_data->num_xforms;
1540 unsigned int num_bufs = int_data->num_bufs;
1542 /* from test_priv_data: */
1543 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1544 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1545 struct rte_comp_op **ops = test_priv_data->ops;
1546 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1547 void **priv_xforms = test_priv_data->priv_xforms;
1548 uint32_t *compressed_data_size = test_priv_data->compressed_data_size;
1549 void **stream = test_priv_data->stream;
1551 const struct rte_compressdev_capabilities *capa =
1552 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1554 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1557 "Decompress operations could not be allocated "
1558 "from the mempool\n");
1563 /* Source buffer is the compressed data from the previous operations */
1564 for (i = 0; i < num_bufs; i++) {
1565 ops[i]->m_src = comp_bufs[i];
1566 ops[i]->m_dst = uncomp_bufs[i];
1567 ops[i]->src.offset = 0;
1569 * Set the length of the compressed data to the
1570 * number of bytes that were produced in the previous stage
1573 if (compressed_data_size[i])
1574 ops[i]->src.length = compressed_data_size[i];
1576 ops[i]->src.length = ops_processed[i]->produced;
1578 ops[i]->dst.offset = 0;
1580 if (operation_type == RTE_COMP_OP_STATELESS) {
1581 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1582 ops[i]->op_type = RTE_COMP_OP_STATELESS;
1583 } else if (!zlib_decompress) {
1584 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1585 ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1588 "Decompression: stateful operations are"
1589 " not supported in these tests yet\n");
1593 ops[i]->input_chksum = 0;
1595 * Copy private data from previous operations,
1596 * to keep the pointer to the original buffer
1598 memcpy(ops[i] + 1, ops_processed[i] + 1,
1599 sizeof(struct priv_op_data));
1603 * Free the previous compress operations,
1604 * as they are not needed anymore
1606 rte_comp_op_bulk_free(ops_processed, num_bufs);
1608 /* Decompress data (either with Zlib API or compressdev API */
1609 if (zlib_decompress) {
1610 for (i = 0; i < num_bufs; i++) {
1611 priv_data = (struct priv_op_data *)(ops[i] + 1);
1612 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1613 const struct rte_comp_xform *decompress_xform =
1614 decompress_xforms[xform_idx];
1616 ret = decompress_zlib(ops[i], decompress_xform);
1622 ops_processed[i] = ops[i];
1625 if (operation_type == RTE_COMP_OP_STATELESS) {
1626 /* Create decompress private xform data */
1627 for (i = 0; i < num_xforms; i++) {
1628 ret = rte_compressdev_private_xform_create(0,
1629 (const struct rte_comp_xform *)
1630 decompress_xforms[i],
1634 "Decompression private xform "
1635 "could not be created\n");
1642 if (capa->comp_feature_flags &
1643 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1644 /* Attach shareable private xform data to ops */
1645 for (i = 0; i < num_bufs; i++) {
1646 priv_data = (struct priv_op_data *)
1648 uint16_t xform_idx =
1649 priv_data->orig_idx % num_xforms;
1650 ops[i]->private_xform =
1651 priv_xforms[xform_idx];
1654 /* Create rest of the private xforms */
1655 /* for the other ops */
1656 for (i = num_xforms; i < num_bufs; i++) {
1658 rte_compressdev_private_xform_create(0,
1659 decompress_xforms[i % num_xforms],
1663 "Decompression private xform"
1664 " could not be created\n");
1671 /* Attach non shareable private xform data */
1673 for (i = 0; i < num_bufs; i++) {
1674 priv_data = (struct priv_op_data *)
1676 uint16_t xform_idx =
1677 priv_data->orig_idx;
1678 ops[i]->private_xform =
1679 priv_xforms[xform_idx];
1683 /* Create a stream object for stateful decompression */
1684 ret = rte_compressdev_stream_create(0,
1685 decompress_xforms[0], stream);
1688 "Decompression stream could not be created, error %d\n",
1693 /* Attach stream to ops */
1694 for (i = 0; i < num_bufs; i++)
1695 ops[i]->stream = *stream;
1698 test_priv_data->num_priv_xforms = num_priv_xforms;
1706 * Prints out the test report. Memory freeing.
1708 * Called after successful decompression.
1709 * Operation(s) status validation and compression buffers freeing.
1711 * -1 returned if function fail.
1714 * Interim data containing session/transformation objects.
1716 * The test parameters set by users (command line parameters).
1717 * @param test_priv_data
1718 * A container used for aggregation all the private test arrays.
1720 * - 2: Next step must be executed by the caller (stateful decompression only)
1721 * - 1: On success (caller should stop and exit)
1726 test_deflate_decomp_finalize(const struct interim_data_params *int_data,
1727 const struct test_data_params *test_data,
1728 const struct test_private_arrays *test_priv_data)
1730 /* local variables: */
1732 struct priv_op_data *priv_data;
1733 static unsigned int step;
1735 /* from int_data: */
1736 unsigned int num_bufs = int_data->num_bufs;
1737 const char * const *test_bufs = int_data->test_bufs;
1738 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1740 /* from test_priv_data: */
1741 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1742 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1743 struct rte_comp_op **ops = test_priv_data->ops;
1744 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1745 unsigned int *decomp_produced_data_size =
1746 test_priv_data->decomp_produced_data_size;
1747 char **all_decomp_data = test_priv_data->all_decomp_data;
1749 /* from test_data: */
1750 unsigned int out_of_space = test_data->out_of_space;
1751 enum rte_comp_op_type operation_type = test_data->decompress_state;
1753 unsigned int zlib_compress =
1754 (test_data->zlib_dir == ZLIB_ALL ||
1755 test_data->zlib_dir == ZLIB_COMPRESS);
1756 unsigned int zlib_decompress =
1757 (test_data->zlib_dir == ZLIB_ALL ||
1758 test_data->zlib_dir == ZLIB_DECOMPRESS);
1760 for (i = 0; i < num_bufs; i++) {
1761 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1762 char engine[] = "zlib, (directly, no PMD)";
1764 strlcpy(engine, "pmd", sizeof(engine));
1765 RTE_LOG(DEBUG, USER1,
1766 "Buffer %u decompressed by %s from %u to %u bytes\n",
1768 ops_processed[i]->consumed, ops_processed[i]->produced);
1773 * Check operation status and free source mbuf (destination mbuf and
1774 * compress operation information is still needed)
1776 for (i = 0; i < num_bufs; i++) {
1777 if (out_of_space && !zlib_decompress) {
1778 if (ops_processed[i]->status !=
1779 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1782 "Operation without expected out of "
1783 "space status error\n");
1789 if (operation_type == RTE_COMP_OP_STATEFUL
1790 && (ops_processed[i]->status ==
1791 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1792 || ops_processed[i]->status ==
1793 RTE_COMP_OP_STATUS_SUCCESS)) {
1795 RTE_LOG(DEBUG, USER1,
1796 ".............RECOVERABLE\n");
1798 /* collect the output into all_decomp_data */
1799 const void *ptr = rte_pktmbuf_read(
1800 ops_processed[i]->m_dst,
1801 ops_processed[i]->dst.offset,
1802 ops_processed[i]->produced,
1804 *decomp_produced_data_size);
1805 if (ptr != *all_decomp_data +
1806 *decomp_produced_data_size)
1807 rte_memcpy(*all_decomp_data +
1808 *decomp_produced_data_size,
1809 ptr, ops_processed[i]->produced);
1811 *decomp_produced_data_size +=
1812 ops_processed[i]->produced;
1813 if (ops_processed[i]->src.length >
1814 ops_processed[i]->consumed) {
1815 if (ops_processed[i]->status ==
1816 RTE_COMP_OP_STATUS_SUCCESS) {
1818 "Operation finished too early\n");
1822 if (step >= test_data->decompress_steps_max) {
1824 "Operation exceeded maximum steps\n");
1827 ops[i] = ops_processed[i];
1829 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1830 ops[i]->src.offset +=
1831 ops_processed[i]->consumed;
1832 ops[i]->src.length -=
1833 ops_processed[i]->consumed;
1834 /* repeat the operation */
1837 /* Compare the original stream with the */
1838 /* decompressed stream (in size and the data) */
1839 priv_data = (struct priv_op_data *)
1840 (ops_processed[i] + 1);
1842 test_bufs[priv_data->orig_idx];
1843 const char *buf2 = *all_decomp_data;
1845 if (compare_buffers(buf1, strlen(buf1) + 1,
1846 buf2, *decomp_produced_data_size) < 0)
1848 /* Test checksums */
1849 if (compress_xforms[0]->compress.chksum
1850 != RTE_COMP_CHECKSUM_NONE) {
1851 if (ops_processed[i]->output_chksum
1852 != compress_checksum[i]) {
1854 "The checksums differ\n"
1855 "Compression Checksum: %" PRIu64 "\tDecompression "
1856 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1857 ops_processed[i]->output_chksum);
1862 } else if (ops_processed[i]->status !=
1863 RTE_COMP_OP_STATUS_SUCCESS) {
1865 "Decomp: Some operations were not successful, status = %u\n",
1866 ops_processed[i]->status);
1869 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1870 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1871 comp_bufs[priv_data->orig_idx] = NULL;
1874 if (out_of_space && !zlib_decompress)
1881 * Validation of the output (compression/decompression) data.
1883 * The function compares the source stream with the output stream,
1884 * after decompression, to check if compression/decompression
1886 * -1 returned if function fail.
1889 * Interim data containing session/transformation objects.
1891 * The test parameters set by users (command line parameters).
1892 * @param test_priv_data
1893 * A container used for aggregation all the private test arrays.
1899 test_results_validation(const struct interim_data_params *int_data,
1900 const struct test_data_params *test_data,
1901 const struct test_private_arrays *test_priv_data)
1903 /* local variables: */
1905 struct priv_op_data *priv_data;
1908 char *contig_buf = NULL;
1911 /* from int_data: */
1912 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1913 unsigned int num_bufs = int_data->num_bufs;
1914 const char * const *test_bufs = int_data->test_bufs;
1916 /* from test_priv_data: */
1917 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1918 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1921 * Compare the original stream with the decompressed stream
1922 * (in size and the data)
1924 for (i = 0; i < num_bufs; i++) {
1925 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1926 buf1 = test_data->use_external_mbufs ?
1927 test_data->inbuf_memzone->addr :
1928 test_bufs[priv_data->orig_idx];
1929 data_size = test_data->use_external_mbufs ?
1930 test_data->inbuf_data_size :
1933 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1934 if (contig_buf == NULL) {
1935 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1940 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1941 ops_processed[i]->produced, contig_buf);
1942 if (compare_buffers(buf1, data_size,
1943 buf2, ops_processed[i]->produced) < 0)
1946 /* Test checksums */
1947 if (compress_xforms[0]->compress.chksum !=
1948 RTE_COMP_CHECKSUM_NONE) {
1949 if (ops_processed[i]->output_chksum !=
1950 compress_checksum[i]) {
1951 RTE_LOG(ERR, USER1, "The checksums differ\n"
1952 "Compression Checksum: %" PRIu64 "\tDecompression "
1953 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1954 ops_processed[i]->output_chksum);
1959 rte_free(contig_buf);
1965 rte_free(contig_buf);
1970 * Compresses and decompresses input stream with compressdev API and Zlib API
1972 * Basic test function. Common for all the functional tests.
1973 * -1 returned if function fail.
1976 * Interim data containing session/transformation objects.
1978 * The test parameters set by users (command line parameters).
1980 * - 1: Some operation not supported
1986 test_deflate_comp_decomp(const struct interim_data_params *int_data,
1987 const struct test_data_params *test_data)
1989 unsigned int num_bufs = int_data->num_bufs;
1990 unsigned int out_of_space = test_data->out_of_space;
1992 void *stream = NULL;
1993 char *all_decomp_data = NULL;
1994 unsigned int decomp_produced_data_size = 0;
1996 int ret_status = -1;
1998 struct rte_mbuf *uncomp_bufs[num_bufs];
1999 struct rte_mbuf *comp_bufs[num_bufs];
2000 struct rte_comp_op *ops[num_bufs];
2001 struct rte_comp_op *ops_processed[num_bufs];
2002 void *priv_xforms[num_bufs];
2005 uint64_t compress_checksum[num_bufs];
2006 uint32_t compressed_data_size[num_bufs];
2007 char *contig_buf = NULL;
2009 struct rte_mbuf_ext_shared_info compbuf_info;
2010 struct rte_mbuf_ext_shared_info decompbuf_info;
2012 const struct rte_compressdev_capabilities *capa;
2014 /* Compressing with CompressDev */
2015 unsigned int zlib_compress =
2016 (test_data->zlib_dir == ZLIB_ALL ||
2017 test_data->zlib_dir == ZLIB_COMPRESS);
2018 unsigned int zlib_decompress =
2019 (test_data->zlib_dir == ZLIB_ALL ||
2020 test_data->zlib_dir == ZLIB_DECOMPRESS);
2022 struct test_private_arrays test_priv_data;
2024 test_priv_data.uncomp_bufs = uncomp_bufs;
2025 test_priv_data.comp_bufs = comp_bufs;
2026 test_priv_data.ops = ops;
2027 test_priv_data.ops_processed = ops_processed;
2028 test_priv_data.priv_xforms = priv_xforms;
2029 test_priv_data.compress_checksum = compress_checksum;
2030 test_priv_data.compressed_data_size = compressed_data_size;
2032 test_priv_data.stream = &stream;
2033 test_priv_data.all_decomp_data = &all_decomp_data;
2034 test_priv_data.decomp_produced_data_size = &decomp_produced_data_size;
2036 test_priv_data.num_priv_xforms = 0; /* it's used for deompression only */
2038 capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2041 "Compress device does not support DEFLATE\n");
2045 /* Prepare the source mbufs with the data */
2046 ret = test_setup_com_bufs(int_data, test_data, &test_priv_data);
2052 RTE_LOG(DEBUG, USER1, "<<< COMPRESSION >>>\n");
2056 /* Prepare output (destination) mbufs for compressed data */
2057 ret = test_setup_output_bufs(
2058 OPERATION_COMPRESSION,
2059 out_of_space == 1 && !zlib_compress,
2069 /* Run compression */
2070 ret = test_deflate_comp_run(int_data, test_data, &test_priv_data);
2076 ret = test_deflate_comp_finalize(int_data, test_data, &test_priv_data);
2080 } else if (ret == 1) {
2083 } else if (ret == 2) {
2084 ret_status = 1; /* some operation not supported */
2090 RTE_LOG(DEBUG, USER1, "<<< DECOMPRESSION >>>\n");
2092 /* Prepare output (destination) mbufs for decompressed data */
2093 ret = test_setup_output_bufs(
2094 OPERATION_DECOMPRESSION,
2095 out_of_space == 1 && !zlib_decompress,
2105 /* Run decompression */
2106 ret = test_deflate_decomp_run(int_data, test_data, &test_priv_data);
2112 if (!zlib_decompress) {
2113 next_step: /* next step for stateful decompression only */
2114 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
2118 "Decompression: enqueue/dequeue operation failed\n");
2122 ret = test_deflate_decomp_finalize(int_data, test_data, &test_priv_data);
2126 } else if (ret == 1) {
2129 } else if (ret == 2) {
2133 /* FINAL PROCESSING */
2135 ret = test_results_validation(int_data, test_data, &test_priv_data);
2143 /* Free resources */
2146 rte_compressdev_stream_free(0, stream);
2147 if (all_decomp_data != NULL)
2148 rte_free(all_decomp_data);
2150 /* Free compress private xforms */
2151 for (i = 0; i < test_priv_data.num_priv_xforms; i++) {
2152 if (priv_xforms[i] != NULL) {
2153 rte_compressdev_private_xform_free(0, priv_xforms[i]);
2154 priv_xforms[i] = NULL;
2157 for (i = 0; i < num_bufs; i++) {
2158 rte_pktmbuf_free(uncomp_bufs[i]);
2159 rte_pktmbuf_free(comp_bufs[i]);
2160 rte_comp_op_free(ops[i]);
2161 rte_comp_op_free(ops_processed[i]);
2163 rte_free(contig_buf);
2169 test_compressdev_deflate_stateless_fixed(void)
2171 struct comp_testsuite_params *ts_params = &testsuite_params;
2174 const struct rte_compressdev_capabilities *capab;
2176 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2177 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2179 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2182 struct rte_comp_xform *compress_xform =
2183 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2185 if (compress_xform == NULL) {
2187 "Compress xform could not be created\n");
2192 memcpy(compress_xform, ts_params->def_comp_xform,
2193 sizeof(struct rte_comp_xform));
2194 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
2196 struct interim_data_params int_data = {
2201 &ts_params->def_decomp_xform,
2205 struct test_data_params test_data = {
2206 .compress_state = RTE_COMP_OP_STATELESS,
2207 .decompress_state = RTE_COMP_OP_STATELESS,
2208 .buff_type = LB_BOTH,
2209 .zlib_dir = ZLIB_DECOMPRESS,
2212 .overflow = OVERFLOW_DISABLED,
2213 .ratio = RATIO_ENABLED
2216 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2217 int_data.test_bufs = &compress_test_bufs[i];
2218 int_data.buf_idx = &i;
2220 /* Compress with compressdev, decompress with Zlib */
2221 test_data.zlib_dir = ZLIB_DECOMPRESS;
2222 ret = test_deflate_comp_decomp(&int_data, &test_data);
2226 /* Compress with Zlib, decompress with compressdev */
2227 test_data.zlib_dir = ZLIB_COMPRESS;
2228 ret = test_deflate_comp_decomp(&int_data, &test_data);
2236 rte_free(compress_xform);
2241 test_compressdev_deflate_stateless_dynamic(void)
2243 struct comp_testsuite_params *ts_params = &testsuite_params;
2246 struct rte_comp_xform *compress_xform =
2247 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2249 const struct rte_compressdev_capabilities *capab;
2251 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2252 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2254 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2257 if (compress_xform == NULL) {
2259 "Compress xform could not be created\n");
2264 memcpy(compress_xform, ts_params->def_comp_xform,
2265 sizeof(struct rte_comp_xform));
2266 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
2268 struct interim_data_params int_data = {
2273 &ts_params->def_decomp_xform,
2277 struct test_data_params test_data = {
2278 .compress_state = RTE_COMP_OP_STATELESS,
2279 .decompress_state = RTE_COMP_OP_STATELESS,
2280 .buff_type = LB_BOTH,
2281 .zlib_dir = ZLIB_DECOMPRESS,
2284 .overflow = OVERFLOW_DISABLED,
2285 .ratio = RATIO_ENABLED
2288 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2289 int_data.test_bufs = &compress_test_bufs[i];
2290 int_data.buf_idx = &i;
2292 /* Compress with compressdev, decompress with Zlib */
2293 test_data.zlib_dir = ZLIB_DECOMPRESS;
2294 ret = test_deflate_comp_decomp(&int_data, &test_data);
2298 /* Compress with Zlib, decompress with compressdev */
2299 test_data.zlib_dir = ZLIB_COMPRESS;
2300 ret = test_deflate_comp_decomp(&int_data, &test_data);
2308 rte_free(compress_xform);
2313 test_compressdev_deflate_stateless_multi_op(void)
2315 struct comp_testsuite_params *ts_params = &testsuite_params;
2316 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
2317 uint16_t buf_idx[num_bufs];
2321 for (i = 0; i < num_bufs; i++)
2324 struct interim_data_params int_data = {
2328 &ts_params->def_comp_xform,
2329 &ts_params->def_decomp_xform,
2333 struct test_data_params test_data = {
2334 .compress_state = RTE_COMP_OP_STATELESS,
2335 .decompress_state = RTE_COMP_OP_STATELESS,
2336 .buff_type = LB_BOTH,
2337 .zlib_dir = ZLIB_DECOMPRESS,
2340 .overflow = OVERFLOW_DISABLED,
2341 .ratio = RATIO_ENABLED
2344 /* Compress with compressdev, decompress with Zlib */
2345 test_data.zlib_dir = ZLIB_DECOMPRESS;
2346 ret = test_deflate_comp_decomp(&int_data, &test_data);
2350 /* Compress with Zlib, decompress with compressdev */
2351 test_data.zlib_dir = ZLIB_COMPRESS;
2352 ret = test_deflate_comp_decomp(&int_data, &test_data);
2356 return TEST_SUCCESS;
2360 test_compressdev_deflate_stateless_multi_level(void)
2362 struct comp_testsuite_params *ts_params = &testsuite_params;
2366 struct rte_comp_xform *compress_xform =
2367 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2369 if (compress_xform == NULL) {
2371 "Compress xform could not be created\n");
2376 memcpy(compress_xform, ts_params->def_comp_xform,
2377 sizeof(struct rte_comp_xform));
2379 struct interim_data_params int_data = {
2384 &ts_params->def_decomp_xform,
2388 struct test_data_params test_data = {
2389 .compress_state = RTE_COMP_OP_STATELESS,
2390 .decompress_state = RTE_COMP_OP_STATELESS,
2391 .buff_type = LB_BOTH,
2392 .zlib_dir = ZLIB_DECOMPRESS,
2395 .overflow = OVERFLOW_DISABLED,
2396 .ratio = RATIO_ENABLED
2399 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2400 int_data.test_bufs = &compress_test_bufs[i];
2401 int_data.buf_idx = &i;
2403 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
2405 compress_xform->compress.level = level;
2406 /* Compress with compressdev, decompress with Zlib */
2407 test_data.zlib_dir = ZLIB_DECOMPRESS;
2408 ret = test_deflate_comp_decomp(&int_data, &test_data);
2417 rte_free(compress_xform);
2421 #define NUM_XFORMS 3
2423 test_compressdev_deflate_stateless_multi_xform(void)
2425 struct comp_testsuite_params *ts_params = &testsuite_params;
2426 uint16_t num_bufs = NUM_XFORMS;
2427 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
2428 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
2429 const char *test_buffers[NUM_XFORMS];
2431 unsigned int level = RTE_COMP_LEVEL_MIN;
2432 uint16_t buf_idx[num_bufs];
2435 /* Create multiple xforms with various levels */
2436 for (i = 0; i < NUM_XFORMS; i++) {
2437 compress_xforms[i] = rte_malloc(NULL,
2438 sizeof(struct rte_comp_xform), 0);
2439 if (compress_xforms[i] == NULL) {
2441 "Compress xform could not be created\n");
2446 memcpy(compress_xforms[i], ts_params->def_comp_xform,
2447 sizeof(struct rte_comp_xform));
2448 compress_xforms[i]->compress.level = level;
2451 decompress_xforms[i] = rte_malloc(NULL,
2452 sizeof(struct rte_comp_xform), 0);
2453 if (decompress_xforms[i] == NULL) {
2455 "Decompress xform could not be created\n");
2460 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
2461 sizeof(struct rte_comp_xform));
2464 for (i = 0; i < NUM_XFORMS; i++) {
2466 /* Use the same buffer in all sessions */
2467 test_buffers[i] = compress_test_bufs[0];
2470 struct interim_data_params int_data = {
2479 struct test_data_params test_data = {
2480 .compress_state = RTE_COMP_OP_STATELESS,
2481 .decompress_state = RTE_COMP_OP_STATELESS,
2482 .buff_type = LB_BOTH,
2483 .zlib_dir = ZLIB_DECOMPRESS,
2486 .overflow = OVERFLOW_DISABLED,
2487 .ratio = RATIO_ENABLED
2490 /* Compress with compressdev, decompress with Zlib */
2491 ret = test_deflate_comp_decomp(&int_data, &test_data);
2498 for (i = 0; i < NUM_XFORMS; i++) {
2499 rte_free(compress_xforms[i]);
2500 rte_free(decompress_xforms[i]);
2507 test_compressdev_deflate_stateless_sgl(void)
2509 struct comp_testsuite_params *ts_params = &testsuite_params;
2512 const struct rte_compressdev_capabilities *capab;
2514 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2515 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2517 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2520 struct interim_data_params int_data = {
2524 &ts_params->def_comp_xform,
2525 &ts_params->def_decomp_xform,
2529 struct test_data_params test_data = {
2530 .compress_state = RTE_COMP_OP_STATELESS,
2531 .decompress_state = RTE_COMP_OP_STATELESS,
2532 .buff_type = SGL_BOTH,
2533 .zlib_dir = ZLIB_DECOMPRESS,
2536 .overflow = OVERFLOW_DISABLED,
2537 .ratio = RATIO_ENABLED
2540 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2541 int_data.test_bufs = &compress_test_bufs[i];
2542 int_data.buf_idx = &i;
2544 /* Compress with compressdev, decompress with Zlib */
2545 test_data.zlib_dir = ZLIB_DECOMPRESS;
2546 ret = test_deflate_comp_decomp(&int_data, &test_data);
2550 /* Compress with Zlib, decompress with compressdev */
2551 test_data.zlib_dir = ZLIB_COMPRESS;
2552 ret = test_deflate_comp_decomp(&int_data, &test_data);
2556 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
2557 /* Compress with compressdev, decompress with Zlib */
2558 test_data.zlib_dir = ZLIB_DECOMPRESS;
2559 test_data.buff_type = SGL_TO_LB;
2560 ret = test_deflate_comp_decomp(&int_data, &test_data);
2564 /* Compress with Zlib, decompress with compressdev */
2565 test_data.zlib_dir = ZLIB_COMPRESS;
2566 test_data.buff_type = SGL_TO_LB;
2567 ret = test_deflate_comp_decomp(&int_data, &test_data);
2572 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
2573 /* Compress with compressdev, decompress with Zlib */
2574 test_data.zlib_dir = ZLIB_DECOMPRESS;
2575 test_data.buff_type = LB_TO_SGL;
2576 ret = test_deflate_comp_decomp(&int_data, &test_data);
2580 /* Compress with Zlib, decompress with compressdev */
2581 test_data.zlib_dir = ZLIB_COMPRESS;
2582 test_data.buff_type = LB_TO_SGL;
2583 ret = test_deflate_comp_decomp(&int_data, &test_data);
2589 return TEST_SUCCESS;
2593 test_compressdev_deflate_stateless_checksum(void)
2595 struct comp_testsuite_params *ts_params = &testsuite_params;
2598 const struct rte_compressdev_capabilities *capab;
2600 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2601 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2603 /* Check if driver supports any checksum */
2604 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
2605 (capab->comp_feature_flags &
2606 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
2607 (capab->comp_feature_flags &
2608 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
2611 struct rte_comp_xform *compress_xform =
2612 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2613 if (compress_xform == NULL) {
2614 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2618 memcpy(compress_xform, ts_params->def_comp_xform,
2619 sizeof(struct rte_comp_xform));
2621 struct rte_comp_xform *decompress_xform =
2622 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2623 if (decompress_xform == NULL) {
2624 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2625 rte_free(compress_xform);
2629 memcpy(decompress_xform, ts_params->def_decomp_xform,
2630 sizeof(struct rte_comp_xform));
2632 struct interim_data_params int_data = {
2641 struct test_data_params test_data = {
2642 .compress_state = RTE_COMP_OP_STATELESS,
2643 .decompress_state = RTE_COMP_OP_STATELESS,
2644 .buff_type = LB_BOTH,
2645 .zlib_dir = ZLIB_DECOMPRESS,
2648 .overflow = OVERFLOW_DISABLED,
2649 .ratio = RATIO_ENABLED
2652 /* Check if driver supports crc32 checksum and test */
2653 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
2654 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2655 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2657 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2658 /* Compress with compressdev, decompress with Zlib */
2659 int_data.test_bufs = &compress_test_bufs[i];
2660 int_data.buf_idx = &i;
2662 /* Generate zlib checksum and test against selected
2663 * drivers decompression checksum
2665 test_data.zlib_dir = ZLIB_COMPRESS;
2666 ret = test_deflate_comp_decomp(&int_data, &test_data);
2670 /* Generate compression and decompression
2671 * checksum of selected driver
2673 test_data.zlib_dir = ZLIB_NONE;
2674 ret = test_deflate_comp_decomp(&int_data, &test_data);
2680 /* Check if driver supports adler32 checksum and test */
2681 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2682 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2683 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2685 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2686 int_data.test_bufs = &compress_test_bufs[i];
2687 int_data.buf_idx = &i;
2689 /* Generate zlib checksum and test against selected
2690 * drivers decompression checksum
2692 test_data.zlib_dir = ZLIB_COMPRESS;
2693 ret = test_deflate_comp_decomp(&int_data, &test_data);
2696 /* Generate compression and decompression
2697 * checksum of selected driver
2699 test_data.zlib_dir = ZLIB_NONE;
2700 ret = test_deflate_comp_decomp(&int_data, &test_data);
2706 /* Check if driver supports combined crc and adler checksum and test */
2707 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2708 compress_xform->compress.chksum =
2709 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2710 decompress_xform->decompress.chksum =
2711 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2713 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2714 int_data.test_bufs = &compress_test_bufs[i];
2715 int_data.buf_idx = &i;
2717 /* Generate compression and decompression
2718 * checksum of selected driver
2720 test_data.zlib_dir = ZLIB_NONE;
2721 ret = test_deflate_comp_decomp(&int_data, &test_data);
2730 rte_free(compress_xform);
2731 rte_free(decompress_xform);
2736 test_compressdev_out_of_space_buffer(void)
2738 struct comp_testsuite_params *ts_params = &testsuite_params;
2741 const struct rte_compressdev_capabilities *capab;
2743 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2745 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2746 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2748 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2751 struct interim_data_params int_data = {
2752 &compress_test_bufs[0],
2755 &ts_params->def_comp_xform,
2756 &ts_params->def_decomp_xform,
2760 struct test_data_params test_data = {
2761 .compress_state = RTE_COMP_OP_STATELESS,
2762 .decompress_state = RTE_COMP_OP_STATELESS,
2763 .buff_type = LB_BOTH,
2764 .zlib_dir = ZLIB_DECOMPRESS,
2765 .out_of_space = 1, /* run out-of-space test */
2767 .overflow = OVERFLOW_DISABLED,
2768 .ratio = RATIO_ENABLED
2770 /* Compress with compressdev, decompress with Zlib */
2771 test_data.zlib_dir = ZLIB_DECOMPRESS;
2772 ret = test_deflate_comp_decomp(&int_data, &test_data);
2776 /* Compress with Zlib, decompress with compressdev */
2777 test_data.zlib_dir = ZLIB_COMPRESS;
2778 ret = test_deflate_comp_decomp(&int_data, &test_data);
2782 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2783 /* Compress with compressdev, decompress with Zlib */
2784 test_data.zlib_dir = ZLIB_DECOMPRESS;
2785 test_data.buff_type = SGL_BOTH;
2786 ret = test_deflate_comp_decomp(&int_data, &test_data);
2790 /* Compress with Zlib, decompress with compressdev */
2791 test_data.zlib_dir = ZLIB_COMPRESS;
2792 test_data.buff_type = SGL_BOTH;
2793 ret = test_deflate_comp_decomp(&int_data, &test_data);
2805 test_compressdev_deflate_stateless_dynamic_big(void)
2807 struct comp_testsuite_params *ts_params = &testsuite_params;
2811 const struct rte_compressdev_capabilities *capab;
2812 char *test_buffer = NULL;
2814 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2815 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2817 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2820 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2823 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2824 if (test_buffer == NULL) {
2826 "Can't allocate buffer for big-data\n");
2830 struct interim_data_params int_data = {
2831 (const char * const *)&test_buffer,
2834 &ts_params->def_comp_xform,
2835 &ts_params->def_decomp_xform,
2839 struct test_data_params test_data = {
2840 .compress_state = RTE_COMP_OP_STATELESS,
2841 .decompress_state = RTE_COMP_OP_STATELESS,
2842 .buff_type = SGL_BOTH,
2843 .zlib_dir = ZLIB_DECOMPRESS,
2846 .overflow = OVERFLOW_DISABLED,
2847 .ratio = RATIO_DISABLED
2850 ts_params->def_comp_xform->compress.deflate.huffman =
2851 RTE_COMP_HUFFMAN_DYNAMIC;
2853 /* fill the buffer with data based on rand. data */
2854 srand(BIG_DATA_TEST_SIZE);
2855 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2856 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2857 test_buffer[BIG_DATA_TEST_SIZE - 1] = 0;
2859 /* Compress with compressdev, decompress with Zlib */
2860 test_data.zlib_dir = ZLIB_DECOMPRESS;
2861 ret = test_deflate_comp_decomp(&int_data, &test_data);
2865 /* Compress with Zlib, decompress with compressdev */
2866 test_data.zlib_dir = ZLIB_COMPRESS;
2867 ret = test_deflate_comp_decomp(&int_data, &test_data);
2874 ts_params->def_comp_xform->compress.deflate.huffman =
2875 RTE_COMP_HUFFMAN_DEFAULT;
2876 rte_free(test_buffer);
2881 test_compressdev_deflate_stateful_decomp(void)
2883 struct comp_testsuite_params *ts_params = &testsuite_params;
2886 const struct rte_compressdev_capabilities *capab;
2888 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2889 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2891 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2894 struct interim_data_params int_data = {
2895 &compress_test_bufs[0],
2898 &ts_params->def_comp_xform,
2899 &ts_params->def_decomp_xform,
2903 struct test_data_params test_data = {
2904 .compress_state = RTE_COMP_OP_STATELESS,
2905 .decompress_state = RTE_COMP_OP_STATEFUL,
2906 .buff_type = LB_BOTH,
2907 .zlib_dir = ZLIB_COMPRESS,
2910 .decompress_output_block_size = 2000,
2911 .decompress_steps_max = 4,
2912 .overflow = OVERFLOW_DISABLED,
2913 .ratio = RATIO_ENABLED
2916 /* Compress with Zlib, decompress with compressdev */
2917 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2922 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2923 /* Now test with SGL buffers */
2924 test_data.buff_type = SGL_BOTH;
2925 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2938 test_compressdev_deflate_stateful_decomp_checksum(void)
2940 struct comp_testsuite_params *ts_params = &testsuite_params;
2943 const struct rte_compressdev_capabilities *capab;
2945 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2946 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2948 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2951 /* Check if driver supports any checksum */
2952 if (!(capab->comp_feature_flags &
2953 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2954 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2957 struct rte_comp_xform *compress_xform =
2958 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2959 if (compress_xform == NULL) {
2960 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2964 memcpy(compress_xform, ts_params->def_comp_xform,
2965 sizeof(struct rte_comp_xform));
2967 struct rte_comp_xform *decompress_xform =
2968 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2969 if (decompress_xform == NULL) {
2970 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2971 rte_free(compress_xform);
2975 memcpy(decompress_xform, ts_params->def_decomp_xform,
2976 sizeof(struct rte_comp_xform));
2978 struct interim_data_params int_data = {
2979 &compress_test_bufs[0],
2987 struct test_data_params test_data = {
2988 .compress_state = RTE_COMP_OP_STATELESS,
2989 .decompress_state = RTE_COMP_OP_STATEFUL,
2990 .buff_type = LB_BOTH,
2991 .zlib_dir = ZLIB_COMPRESS,
2994 .decompress_output_block_size = 2000,
2995 .decompress_steps_max = 4,
2996 .overflow = OVERFLOW_DISABLED,
2997 .ratio = RATIO_ENABLED
3000 /* Check if driver supports crc32 checksum and test */
3001 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
3002 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
3003 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
3004 /* Compress with Zlib, decompress with compressdev */
3005 test_data.buff_type = LB_BOTH;
3006 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3010 if (capab->comp_feature_flags &
3011 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
3012 /* Now test with SGL buffers */
3013 test_data.buff_type = SGL_BOTH;
3014 if (test_deflate_comp_decomp(&int_data,
3022 /* Check if driver supports adler32 checksum and test */
3023 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
3024 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
3025 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
3026 /* Compress with Zlib, decompress with compressdev */
3027 test_data.buff_type = LB_BOTH;
3028 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3032 if (capab->comp_feature_flags &
3033 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
3034 /* Now test with SGL buffers */
3035 test_data.buff_type = SGL_BOTH;
3036 if (test_deflate_comp_decomp(&int_data,
3044 /* Check if driver supports combined crc and adler checksum and test */
3045 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
3046 compress_xform->compress.chksum =
3047 RTE_COMP_CHECKSUM_CRC32_ADLER32;
3048 decompress_xform->decompress.chksum =
3049 RTE_COMP_CHECKSUM_CRC32_ADLER32;
3050 /* Zlib doesn't support combined checksum */
3051 test_data.zlib_dir = ZLIB_NONE;
3052 /* Compress stateless, decompress stateful with compressdev */
3053 test_data.buff_type = LB_BOTH;
3054 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3058 if (capab->comp_feature_flags &
3059 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
3060 /* Now test with SGL buffers */
3061 test_data.buff_type = SGL_BOTH;
3062 if (test_deflate_comp_decomp(&int_data,
3073 rte_free(compress_xform);
3074 rte_free(decompress_xform);
3078 static const struct rte_memzone *
3079 make_memzone(const char *name, size_t size)
3081 unsigned int socket_id = rte_socket_id();
3082 char mz_name[RTE_MEMZONE_NAMESIZE];
3083 const struct rte_memzone *memzone;
3085 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
3086 memzone = rte_memzone_lookup(mz_name);
3087 if (memzone != NULL && memzone->len != size) {
3088 rte_memzone_free(memzone);
3091 if (memzone == NULL) {
3092 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
3093 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
3094 if (memzone == NULL)
3095 RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
3102 test_compressdev_external_mbufs(void)
3104 struct comp_testsuite_params *ts_params = &testsuite_params;
3105 size_t data_len = 0;
3107 int ret = TEST_FAILED;
3109 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
3110 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
3112 struct interim_data_params int_data = {
3116 &ts_params->def_comp_xform,
3117 &ts_params->def_decomp_xform,
3121 struct test_data_params test_data = {
3122 .compress_state = RTE_COMP_OP_STATELESS,
3123 .decompress_state = RTE_COMP_OP_STATELESS,
3124 .buff_type = LB_BOTH,
3125 .zlib_dir = ZLIB_DECOMPRESS,
3128 .use_external_mbufs = 1,
3129 .inbuf_data_size = data_len,
3130 .inbuf_memzone = make_memzone("inbuf", data_len),
3131 .compbuf_memzone = make_memzone("compbuf", data_len *
3132 COMPRESS_BUF_SIZE_RATIO),
3133 .uncompbuf_memzone = make_memzone("decompbuf", data_len),
3134 .overflow = OVERFLOW_DISABLED
3137 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3138 /* prepare input data */
3139 data_len = strlen(compress_test_bufs[i]) + 1;
3140 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
3142 test_data.inbuf_data_size = data_len;
3143 int_data.buf_idx = &i;
3145 /* Compress with compressdev, decompress with Zlib */
3146 test_data.zlib_dir = ZLIB_DECOMPRESS;
3147 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3150 /* Compress with Zlib, decompress with compressdev */
3151 test_data.zlib_dir = ZLIB_COMPRESS;
3152 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3159 rte_memzone_free(test_data.inbuf_memzone);
3160 rte_memzone_free(test_data.compbuf_memzone);
3161 rte_memzone_free(test_data.uncompbuf_memzone);
3166 test_compressdev_deflate_stateless_fixed_oos_recoverable(void)
3168 struct comp_testsuite_params *ts_params = &testsuite_params;
3172 const struct rte_compressdev_capabilities *capab;
3174 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3175 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3177 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
3180 struct rte_comp_xform *compress_xform =
3181 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
3183 if (compress_xform == NULL) {
3185 "Compress xform could not be created\n");
3190 memcpy(compress_xform, ts_params->def_comp_xform,
3191 sizeof(struct rte_comp_xform));
3192 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
3194 struct interim_data_params int_data = {
3199 &ts_params->def_decomp_xform,
3203 struct test_data_params test_data = {
3204 .compress_state = RTE_COMP_OP_STATELESS,
3205 .decompress_state = RTE_COMP_OP_STATELESS,
3206 .buff_type = LB_BOTH,
3207 .zlib_dir = ZLIB_DECOMPRESS,
3210 .overflow = OVERFLOW_ENABLED,
3211 .ratio = RATIO_ENABLED
3214 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3215 int_data.test_bufs = &compress_test_bufs[i];
3216 int_data.buf_idx = &i;
3218 /* Compress with compressdev, decompress with Zlib */
3219 test_data.zlib_dir = ZLIB_DECOMPRESS;
3220 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3221 if (comp_result < 0) {
3224 } else if (comp_result > 0) {
3229 /* Compress with Zlib, decompress with compressdev */
3230 test_data.zlib_dir = ZLIB_COMPRESS;
3231 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3232 if (comp_result < 0) {
3235 } else if (comp_result > 0) {
3244 rte_free(compress_xform);
3249 test_compressdev_deflate_im_buffers_LB_1op(void)
3251 struct comp_testsuite_params *ts_params = &testsuite_params;
3253 int ret = TEST_SUCCESS;
3255 const struct rte_compressdev_capabilities *capab;
3256 char *test_buffer = NULL;
3258 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3259 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3261 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3264 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3267 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3268 if (test_buffer == NULL) {
3270 "Can't allocate buffer for 'im buffer' test\n");
3274 struct interim_data_params int_data = {
3275 (const char * const *)&test_buffer,
3278 &ts_params->def_comp_xform,
3279 &ts_params->def_decomp_xform,
3283 struct test_data_params test_data = {
3284 .compress_state = RTE_COMP_OP_STATELESS,
3285 .decompress_state = RTE_COMP_OP_STATELESS,
3286 /* must be LB to SGL,
3287 * input LB buffer reaches its maximum,
3288 * if ratio 1.3 than another mbuf must be
3289 * created and attached
3291 .buff_type = LB_BOTH,
3292 .zlib_dir = ZLIB_NONE,
3295 .overflow = OVERFLOW_DISABLED,
3296 .ratio = RATIO_DISABLED
3299 ts_params->def_comp_xform->compress.deflate.huffman =
3300 RTE_COMP_HUFFMAN_DYNAMIC;
3302 /* fill the buffer with data based on rand. data */
3303 srand(IM_BUF_DATA_TEST_SIZE_LB);
3304 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3305 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3307 /* Compress with compressdev, decompress with compressdev */
3308 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3314 ts_params->def_comp_xform->compress.deflate.huffman =
3315 RTE_COMP_HUFFMAN_DEFAULT;
3316 rte_free(test_buffer);
3321 test_compressdev_deflate_im_buffers_LB_2ops_first(void)
3323 struct comp_testsuite_params *ts_params = &testsuite_params;
3325 int ret = TEST_SUCCESS;
3327 const struct rte_compressdev_capabilities *capab;
3328 char *test_buffer = NULL;
3329 const char *test_buffers[2];
3331 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3332 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3334 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3337 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3340 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3341 if (test_buffer == NULL) {
3343 "Can't allocate buffer for 'im buffer' test\n");
3347 test_buffers[0] = test_buffer;
3348 test_buffers[1] = compress_test_bufs[0];
3350 struct interim_data_params int_data = {
3351 (const char * const *)test_buffers,
3354 &ts_params->def_comp_xform,
3355 &ts_params->def_decomp_xform,
3359 struct test_data_params test_data = {
3360 .compress_state = RTE_COMP_OP_STATELESS,
3361 .decompress_state = RTE_COMP_OP_STATELESS,
3362 .buff_type = LB_BOTH,
3363 .zlib_dir = ZLIB_NONE,
3366 .overflow = OVERFLOW_DISABLED,
3367 .ratio = RATIO_DISABLED
3370 ts_params->def_comp_xform->compress.deflate.huffman =
3371 RTE_COMP_HUFFMAN_DYNAMIC;
3373 /* fill the buffer with data based on rand. data */
3374 srand(IM_BUF_DATA_TEST_SIZE_LB);
3375 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3376 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3378 /* Compress with compressdev, decompress with compressdev */
3379 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3385 ts_params->def_comp_xform->compress.deflate.huffman =
3386 RTE_COMP_HUFFMAN_DEFAULT;
3387 rte_free(test_buffer);
3392 test_compressdev_deflate_im_buffers_LB_2ops_second(void)
3394 struct comp_testsuite_params *ts_params = &testsuite_params;
3396 int ret = TEST_SUCCESS;
3398 const struct rte_compressdev_capabilities *capab;
3399 char *test_buffer = NULL;
3400 const char *test_buffers[2];
3402 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3403 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3405 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3408 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3411 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3412 if (test_buffer == NULL) {
3414 "Can't allocate buffer for 'im buffer' test\n");
3418 test_buffers[0] = compress_test_bufs[0];
3419 test_buffers[1] = test_buffer;
3421 struct interim_data_params int_data = {
3422 (const char * const *)test_buffers,
3425 &ts_params->def_comp_xform,
3426 &ts_params->def_decomp_xform,
3430 struct test_data_params test_data = {
3431 .compress_state = RTE_COMP_OP_STATELESS,
3432 .decompress_state = RTE_COMP_OP_STATELESS,
3433 .buff_type = LB_BOTH,
3434 .zlib_dir = ZLIB_NONE,
3437 .overflow = OVERFLOW_DISABLED,
3438 .ratio = RATIO_DISABLED
3441 ts_params->def_comp_xform->compress.deflate.huffman =
3442 RTE_COMP_HUFFMAN_DYNAMIC;
3444 /* fill the buffer with data based on rand. data */
3445 srand(IM_BUF_DATA_TEST_SIZE_LB);
3446 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3447 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3449 /* Compress with compressdev, decompress with compressdev */
3450 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3456 ts_params->def_comp_xform->compress.deflate.huffman =
3457 RTE_COMP_HUFFMAN_DEFAULT;
3458 rte_free(test_buffer);
3463 test_compressdev_deflate_im_buffers_LB_3ops(void)
3465 struct comp_testsuite_params *ts_params = &testsuite_params;
3467 int ret = TEST_SUCCESS;
3469 const struct rte_compressdev_capabilities *capab;
3470 char *test_buffer = NULL;
3471 const char *test_buffers[3];
3473 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3474 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3476 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3479 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3482 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3483 if (test_buffer == NULL) {
3485 "Can't allocate buffer for 'im buffer' test\n");
3489 test_buffers[0] = compress_test_bufs[0];
3490 test_buffers[1] = test_buffer;
3491 test_buffers[2] = compress_test_bufs[1];
3493 struct interim_data_params int_data = {
3494 (const char * const *)test_buffers,
3497 &ts_params->def_comp_xform,
3498 &ts_params->def_decomp_xform,
3502 struct test_data_params test_data = {
3503 .compress_state = RTE_COMP_OP_STATELESS,
3504 .decompress_state = RTE_COMP_OP_STATELESS,
3505 .buff_type = LB_BOTH,
3506 .zlib_dir = ZLIB_NONE,
3509 .overflow = OVERFLOW_DISABLED,
3510 .ratio = RATIO_DISABLED
3513 ts_params->def_comp_xform->compress.deflate.huffman =
3514 RTE_COMP_HUFFMAN_DYNAMIC;
3516 /* fill the buffer with data based on rand. data */
3517 srand(IM_BUF_DATA_TEST_SIZE_LB);
3518 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3519 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3521 /* Compress with compressdev, decompress with compressdev */
3522 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3528 ts_params->def_comp_xform->compress.deflate.huffman =
3529 RTE_COMP_HUFFMAN_DEFAULT;
3530 rte_free(test_buffer);
3535 test_compressdev_deflate_im_buffers_LB_4ops(void)
3537 struct comp_testsuite_params *ts_params = &testsuite_params;
3539 int ret = TEST_SUCCESS;
3541 const struct rte_compressdev_capabilities *capab;
3542 char *test_buffer = NULL;
3543 const char *test_buffers[4];
3545 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3546 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3548 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3551 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3554 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0);
3555 if (test_buffer == NULL) {
3557 "Can't allocate buffer for 'im buffer' test\n");
3561 test_buffers[0] = compress_test_bufs[0];
3562 test_buffers[1] = test_buffer;
3563 test_buffers[2] = compress_test_bufs[1];
3564 test_buffers[3] = test_buffer;
3566 struct interim_data_params int_data = {
3567 (const char * const *)test_buffers,
3570 &ts_params->def_comp_xform,
3571 &ts_params->def_decomp_xform,
3575 struct test_data_params test_data = {
3576 .compress_state = RTE_COMP_OP_STATELESS,
3577 .decompress_state = RTE_COMP_OP_STATELESS,
3578 .buff_type = LB_BOTH,
3579 .zlib_dir = ZLIB_NONE,
3582 .overflow = OVERFLOW_DISABLED,
3583 .ratio = RATIO_DISABLED
3586 ts_params->def_comp_xform->compress.deflate.huffman =
3587 RTE_COMP_HUFFMAN_DYNAMIC;
3589 /* fill the buffer with data based on rand. data */
3590 srand(IM_BUF_DATA_TEST_SIZE_LB);
3591 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j)
3592 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3594 /* Compress with compressdev, decompress with compressdev */
3595 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3601 ts_params->def_comp_xform->compress.deflate.huffman =
3602 RTE_COMP_HUFFMAN_DEFAULT;
3603 rte_free(test_buffer);
3609 test_compressdev_deflate_im_buffers_SGL_1op(void)
3611 struct comp_testsuite_params *ts_params = &testsuite_params;
3613 int ret = TEST_SUCCESS;
3615 const struct rte_compressdev_capabilities *capab;
3616 char *test_buffer = NULL;
3618 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3619 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3621 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3624 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3627 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3628 if (test_buffer == NULL) {
3630 "Can't allocate buffer for big-data\n");
3634 struct interim_data_params int_data = {
3635 (const char * const *)&test_buffer,
3638 &ts_params->def_comp_xform,
3639 &ts_params->def_decomp_xform,
3643 struct test_data_params test_data = {
3644 .compress_state = RTE_COMP_OP_STATELESS,
3645 .decompress_state = RTE_COMP_OP_STATELESS,
3646 .buff_type = SGL_BOTH,
3647 .zlib_dir = ZLIB_NONE,
3650 .overflow = OVERFLOW_DISABLED,
3651 .ratio = RATIO_DISABLED
3654 ts_params->def_comp_xform->compress.deflate.huffman =
3655 RTE_COMP_HUFFMAN_DYNAMIC;
3657 /* fill the buffer with data based on rand. data */
3658 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3659 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3660 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3662 /* Compress with compressdev, decompress with compressdev */
3663 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3669 ts_params->def_comp_xform->compress.deflate.huffman =
3670 RTE_COMP_HUFFMAN_DEFAULT;
3671 rte_free(test_buffer);
3676 test_compressdev_deflate_im_buffers_SGL_2ops_first(void)
3678 struct comp_testsuite_params *ts_params = &testsuite_params;
3680 int ret = TEST_SUCCESS;
3682 const struct rte_compressdev_capabilities *capab;
3683 char *test_buffer = NULL;
3684 const char *test_buffers[2];
3686 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3687 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3689 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3692 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3695 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3696 if (test_buffer == NULL) {
3698 "Can't allocate buffer for big-data\n");
3702 test_buffers[0] = test_buffer;
3703 test_buffers[1] = compress_test_bufs[0];
3705 struct interim_data_params int_data = {
3706 (const char * const *)test_buffers,
3709 &ts_params->def_comp_xform,
3710 &ts_params->def_decomp_xform,
3714 struct test_data_params test_data = {
3715 .compress_state = RTE_COMP_OP_STATELESS,
3716 .decompress_state = RTE_COMP_OP_STATELESS,
3717 .buff_type = SGL_BOTH,
3718 .zlib_dir = ZLIB_NONE,
3721 .overflow = OVERFLOW_DISABLED,
3722 .ratio = RATIO_DISABLED
3725 ts_params->def_comp_xform->compress.deflate.huffman =
3726 RTE_COMP_HUFFMAN_DYNAMIC;
3728 /* fill the buffer with data based on rand. data */
3729 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3730 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3731 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3733 /* Compress with compressdev, decompress with compressdev */
3734 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3740 ts_params->def_comp_xform->compress.deflate.huffman =
3741 RTE_COMP_HUFFMAN_DEFAULT;
3742 rte_free(test_buffer);
3747 test_compressdev_deflate_im_buffers_SGL_2ops_second(void)
3749 struct comp_testsuite_params *ts_params = &testsuite_params;
3751 int ret = TEST_SUCCESS;
3753 const struct rte_compressdev_capabilities *capab;
3754 char *test_buffer = NULL;
3755 const char *test_buffers[2];
3757 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3758 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3760 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3763 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3766 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3767 if (test_buffer == NULL) {
3769 "Can't allocate buffer for big-data\n");
3773 test_buffers[0] = compress_test_bufs[0];
3774 test_buffers[1] = test_buffer;
3776 struct interim_data_params int_data = {
3777 (const char * const *)test_buffers,
3780 &ts_params->def_comp_xform,
3781 &ts_params->def_decomp_xform,
3785 struct test_data_params test_data = {
3786 .compress_state = RTE_COMP_OP_STATELESS,
3787 .decompress_state = RTE_COMP_OP_STATELESS,
3788 .buff_type = SGL_BOTH,
3789 .zlib_dir = ZLIB_NONE,
3792 .overflow = OVERFLOW_DISABLED,
3793 .ratio = RATIO_DISABLED
3796 ts_params->def_comp_xform->compress.deflate.huffman =
3797 RTE_COMP_HUFFMAN_DYNAMIC;
3799 /* fill the buffer with data based on rand. data */
3800 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3801 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3802 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3804 /* Compress with compressdev, decompress with compressdev */
3805 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3811 ts_params->def_comp_xform->compress.deflate.huffman =
3812 RTE_COMP_HUFFMAN_DEFAULT;
3813 rte_free(test_buffer);
3818 test_compressdev_deflate_im_buffers_SGL_3ops(void)
3820 struct comp_testsuite_params *ts_params = &testsuite_params;
3822 int ret = TEST_SUCCESS;
3824 const struct rte_compressdev_capabilities *capab;
3825 char *test_buffer = NULL;
3826 const char *test_buffers[3];
3828 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3829 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3831 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3834 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3837 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3838 if (test_buffer == NULL) {
3840 "Can't allocate buffer for big-data\n");
3844 test_buffers[0] = compress_test_bufs[0];
3845 test_buffers[1] = test_buffer;
3846 test_buffers[2] = compress_test_bufs[1];
3848 struct interim_data_params int_data = {
3849 (const char * const *)test_buffers,
3852 &ts_params->def_comp_xform,
3853 &ts_params->def_decomp_xform,
3857 struct test_data_params test_data = {
3858 .compress_state = RTE_COMP_OP_STATELESS,
3859 .decompress_state = RTE_COMP_OP_STATELESS,
3860 .buff_type = SGL_BOTH,
3861 .zlib_dir = ZLIB_NONE,
3864 .overflow = OVERFLOW_DISABLED,
3865 .ratio = RATIO_DISABLED
3868 ts_params->def_comp_xform->compress.deflate.huffman =
3869 RTE_COMP_HUFFMAN_DYNAMIC;
3871 /* fill the buffer with data based on rand. data */
3872 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3873 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3874 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3876 /* Compress with compressdev, decompress with compressdev */
3877 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3883 ts_params->def_comp_xform->compress.deflate.huffman =
3884 RTE_COMP_HUFFMAN_DEFAULT;
3885 rte_free(test_buffer);
3891 test_compressdev_deflate_im_buffers_SGL_4ops(void)
3893 struct comp_testsuite_params *ts_params = &testsuite_params;
3895 int ret = TEST_SUCCESS;
3897 const struct rte_compressdev_capabilities *capab;
3898 char *test_buffer = NULL;
3899 const char *test_buffers[4];
3901 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3902 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3904 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3907 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3910 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0);
3911 if (test_buffer == NULL) {
3913 "Can't allocate buffer for big-data\n");
3917 test_buffers[0] = compress_test_bufs[0];
3918 test_buffers[1] = test_buffer;
3919 test_buffers[2] = compress_test_bufs[1];
3920 test_buffers[3] = test_buffer;
3922 struct interim_data_params int_data = {
3923 (const char * const *)test_buffers,
3926 &ts_params->def_comp_xform,
3927 &ts_params->def_decomp_xform,
3931 struct test_data_params test_data = {
3932 .compress_state = RTE_COMP_OP_STATELESS,
3933 .decompress_state = RTE_COMP_OP_STATELESS,
3934 .buff_type = SGL_BOTH,
3935 .zlib_dir = ZLIB_NONE,
3938 .overflow = OVERFLOW_DISABLED,
3939 .ratio = RATIO_DISABLED
3942 ts_params->def_comp_xform->compress.deflate.huffman =
3943 RTE_COMP_HUFFMAN_DYNAMIC;
3945 /* fill the buffer with data based on rand. data */
3946 srand(IM_BUF_DATA_TEST_SIZE_SGL);
3947 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j)
3948 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
3950 /* Compress with compressdev, decompress with compressdev */
3951 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
3957 ts_params->def_comp_xform->compress.deflate.huffman =
3958 RTE_COMP_HUFFMAN_DEFAULT;
3959 rte_free(test_buffer);
3964 test_compressdev_deflate_im_buffers_SGL_over_1op(void)
3966 struct comp_testsuite_params *ts_params = &testsuite_params;
3968 int ret = TEST_SUCCESS;
3970 const struct rte_compressdev_capabilities *capab;
3971 char *test_buffer = NULL;
3973 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
3975 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3976 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3978 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
3981 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
3984 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0);
3985 if (test_buffer == NULL) {
3987 "Can't allocate buffer for big-data\n");
3991 struct interim_data_params int_data = {
3992 (const char * const *)&test_buffer,
3995 &ts_params->def_comp_xform,
3996 &ts_params->def_decomp_xform,
4000 struct test_data_params test_data = {
4001 .compress_state = RTE_COMP_OP_STATELESS,
4002 .decompress_state = RTE_COMP_OP_STATELESS,
4003 .buff_type = SGL_BOTH,
4004 .zlib_dir = ZLIB_NONE,
4007 .overflow = OVERFLOW_DISABLED,
4008 .ratio = RATIO_DISABLED
4011 ts_params->def_comp_xform->compress.deflate.huffman =
4012 RTE_COMP_HUFFMAN_DYNAMIC;
4014 /* fill the buffer with data based on rand. data */
4015 srand(IM_BUF_DATA_TEST_SIZE_OVER);
4016 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j)
4017 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
4019 /* Compress with compressdev, decompress with compressdev */
4020 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
4026 ts_params->def_comp_xform->compress.deflate.huffman =
4027 RTE_COMP_HUFFMAN_DEFAULT;
4028 rte_free(test_buffer);
4035 test_compressdev_deflate_im_buffers_SGL_over_2ops_first(void)
4037 struct comp_testsuite_params *ts_params = &testsuite_params;
4039 int ret = TEST_SUCCESS;
4041 const struct rte_compressdev_capabilities *capab;
4042 char *test_buffer = NULL;
4043 const char *test_buffers[2];
4045 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
4047 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
4048 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
4050 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
4053 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
4056 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0);
4057 if (test_buffer == NULL) {
4059 "Can't allocate buffer for big-data\n");
4063 test_buffers[0] = test_buffer;
4064 test_buffers[1] = compress_test_bufs[0];
4066 struct interim_data_params int_data = {
4067 (const char * const *)test_buffers,
4070 &ts_params->def_comp_xform,
4071 &ts_params->def_decomp_xform,
4075 struct test_data_params test_data = {
4076 .compress_state = RTE_COMP_OP_STATELESS,
4077 .decompress_state = RTE_COMP_OP_STATELESS,
4078 .buff_type = SGL_BOTH,
4079 .zlib_dir = ZLIB_NONE,
4082 .overflow = OVERFLOW_DISABLED,
4083 .ratio = RATIO_DISABLED
4086 ts_params->def_comp_xform->compress.deflate.huffman =
4087 RTE_COMP_HUFFMAN_DYNAMIC;
4089 /* fill the buffer with data based on rand. data */
4090 srand(IM_BUF_DATA_TEST_SIZE_OVER);
4091 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j)
4092 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
4094 /* Compress with compressdev, decompress with compressdev */
4095 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
4101 ts_params->def_comp_xform->compress.deflate.huffman =
4102 RTE_COMP_HUFFMAN_DEFAULT;
4103 rte_free(test_buffer);
4108 test_compressdev_deflate_im_buffers_SGL_over_2ops_second(void)
4110 struct comp_testsuite_params *ts_params = &testsuite_params;
4112 int ret = TEST_SUCCESS;
4114 const struct rte_compressdev_capabilities *capab;
4115 char *test_buffer = NULL;
4116 const char *test_buffers[2];
4118 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
4120 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
4121 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
4123 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
4126 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
4129 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0);
4130 if (test_buffer == NULL) {
4132 "Can't allocate buffer for big-data\n");
4136 test_buffers[0] = compress_test_bufs[0];
4137 test_buffers[1] = test_buffer;
4139 struct interim_data_params int_data = {
4140 (const char * const *)test_buffers,
4143 &ts_params->def_comp_xform,
4144 &ts_params->def_decomp_xform,
4148 struct test_data_params test_data = {
4149 .compress_state = RTE_COMP_OP_STATELESS,
4150 .decompress_state = RTE_COMP_OP_STATELESS,
4151 .buff_type = SGL_BOTH,
4152 .zlib_dir = ZLIB_NONE,
4155 .overflow = OVERFLOW_DISABLED,
4156 .ratio = RATIO_DISABLED
4159 ts_params->def_comp_xform->compress.deflate.huffman =
4160 RTE_COMP_HUFFMAN_DYNAMIC;
4162 /* fill the buffer with data based on rand. data */
4163 srand(IM_BUF_DATA_TEST_SIZE_OVER);
4164 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j)
4165 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
4167 /* Compress with compressdev, decompress with compressdev */
4168 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
4174 ts_params->def_comp_xform->compress.deflate.huffman =
4175 RTE_COMP_HUFFMAN_DEFAULT;
4176 rte_free(test_buffer);
4180 static struct unit_test_suite compressdev_testsuite = {
4181 .suite_name = "compressdev unit test suite",
4182 .setup = testsuite_setup,
4183 .teardown = testsuite_teardown,
4184 .unit_test_cases = {
4185 TEST_CASE_ST(NULL, NULL,
4186 test_compressdev_invalid_configuration),
4187 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4188 test_compressdev_deflate_stateless_fixed),
4189 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4190 test_compressdev_deflate_stateless_dynamic),
4191 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4192 test_compressdev_deflate_stateless_dynamic_big),
4193 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4194 test_compressdev_deflate_stateless_multi_op),
4195 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4196 test_compressdev_deflate_stateless_multi_level),
4197 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4198 test_compressdev_deflate_stateless_multi_xform),
4199 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4200 test_compressdev_deflate_stateless_sgl),
4201 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4202 test_compressdev_deflate_stateless_checksum),
4203 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4204 test_compressdev_out_of_space_buffer),
4205 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4206 test_compressdev_deflate_stateful_decomp),
4207 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4208 test_compressdev_deflate_stateful_decomp_checksum),
4209 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4210 test_compressdev_external_mbufs),
4211 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4212 test_compressdev_deflate_stateless_fixed_oos_recoverable),
4214 /* Positive test cases for IM buffer handling verification */
4215 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4216 test_compressdev_deflate_im_buffers_LB_1op),
4217 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4218 test_compressdev_deflate_im_buffers_LB_2ops_first),
4219 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4220 test_compressdev_deflate_im_buffers_LB_2ops_second),
4221 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4222 test_compressdev_deflate_im_buffers_LB_3ops),
4224 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4225 test_compressdev_deflate_im_buffers_LB_4ops),
4226 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4227 test_compressdev_deflate_im_buffers_SGL_1op),
4229 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4230 test_compressdev_deflate_im_buffers_SGL_2ops_first),
4231 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4232 test_compressdev_deflate_im_buffers_SGL_2ops_second),
4233 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4234 test_compressdev_deflate_im_buffers_SGL_3ops),
4235 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4236 test_compressdev_deflate_im_buffers_SGL_4ops),
4238 /* Negative test cases for IM buffer handling verification */
4240 /* For this test huge mempool is necessary.
4241 * It tests one case:
4242 * only one op containing big amount of data, so that
4243 * number of requested descriptors higher than number
4244 * of available descriptors (128)
4246 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4247 test_compressdev_deflate_im_buffers_SGL_over_1op),
4249 /* For this test huge mempool is necessary.
4250 * 2 ops. First op contains big amount of data:
4251 * number of requested descriptors higher than number
4252 * of available descriptors (128), the second op is
4253 * relatively small. In this case both ops are rejected
4255 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4256 test_compressdev_deflate_im_buffers_SGL_over_2ops_first),
4258 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
4259 test_compressdev_deflate_im_buffers_SGL_over_2ops_second),
4261 TEST_CASES_END() /**< NULL terminate unit test array */
4266 test_compressdev(void)
4268 return unit_test_suite_runner(&compressdev_testsuite);
4271 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);