1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2
34 #define NUM_LARGE_MBUFS 16
35 #define SMALL_SEG_SIZE 256
38 #define NUM_MAX_XFORMS 16
39 #define NUM_MAX_INFLIGHT_OPS 128
42 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
43 #define ZLIB_HEADER_SIZE 2
44 #define ZLIB_TRAILER_SIZE 4
45 #define GZIP_HEADER_SIZE 10
46 #define GZIP_TRAILER_SIZE 8
48 #define OUT_OF_SPACE_BUF 1
50 #define MAX_MBUF_SEGMENT_SIZE 65535
51 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
52 #define NUM_BIG_MBUFS 4
53 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
56 huffman_type_strings[] = {
57 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
58 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
59 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
70 LB_BOTH = 0, /* both input and output are linear*/
71 SGL_BOTH, /* both input and output are chained */
72 SGL_TO_LB, /* input buffer is chained */
73 LB_TO_SGL /* output buffer is chained */
82 OPERATION_COMPRESSION,
83 OPERATION_DECOMPRESSION
90 struct comp_testsuite_params {
91 struct rte_mempool *large_mbuf_pool;
92 struct rte_mempool *small_mbuf_pool;
93 struct rte_mempool *big_mbuf_pool;
94 struct rte_mempool *op_pool;
95 struct rte_comp_xform *def_comp_xform;
96 struct rte_comp_xform *def_decomp_xform;
99 struct interim_data_params {
100 const char * const *test_bufs;
101 unsigned int num_bufs;
103 struct rte_comp_xform **compress_xforms;
104 struct rte_comp_xform **decompress_xforms;
105 unsigned int num_xforms;
108 struct test_data_params {
109 enum rte_comp_op_type compress_state;
110 enum rte_comp_op_type decompress_state;
111 enum varied_buff buff_type;
112 enum zlib_direction zlib_dir;
113 unsigned int out_of_space;
114 unsigned int big_data;
115 /* stateful decompression specific parameters */
116 unsigned int decompress_output_block_size;
117 unsigned int decompress_steps_max;
118 /* external mbufs specific parameters */
119 unsigned int use_external_mbufs;
120 unsigned int inbuf_data_size;
121 const struct rte_memzone *inbuf_memzone;
122 const struct rte_memzone *compbuf_memzone;
123 const struct rte_memzone *uncompbuf_memzone;
124 /* overflow test activation */
125 enum overflow_test overflow;
128 struct test_private_arrays {
129 struct rte_mbuf **uncomp_bufs;
130 struct rte_mbuf **comp_bufs;
131 struct rte_comp_op **ops;
132 struct rte_comp_op **ops_processed;
134 uint64_t *compress_checksum;
135 uint32_t *compressed_data_size;
137 char **all_decomp_data;
138 unsigned int *decomp_produced_data_size;
139 uint16_t num_priv_xforms;
142 static struct comp_testsuite_params testsuite_params = { 0 };
145 testsuite_teardown(void)
147 struct comp_testsuite_params *ts_params = &testsuite_params;
149 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
150 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
151 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
152 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
153 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
154 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
155 if (rte_mempool_in_use_count(ts_params->op_pool))
156 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
158 rte_mempool_free(ts_params->large_mbuf_pool);
159 rte_mempool_free(ts_params->small_mbuf_pool);
160 rte_mempool_free(ts_params->big_mbuf_pool);
161 rte_mempool_free(ts_params->op_pool);
162 rte_free(ts_params->def_comp_xform);
163 rte_free(ts_params->def_decomp_xform);
167 testsuite_setup(void)
169 struct comp_testsuite_params *ts_params = &testsuite_params;
170 uint32_t max_buf_size = 0;
173 if (rte_compressdev_count() == 0) {
174 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
178 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
179 rte_compressdev_name_get(0));
181 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
182 max_buf_size = RTE_MAX(max_buf_size,
183 strlen(compress_test_bufs[i]) + 1);
186 * Buffers to be used in compression and decompression.
187 * Since decompressed data might be larger than
188 * compressed data (due to block header),
189 * buffers should be big enough for both cases.
191 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
192 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
195 max_buf_size + RTE_PKTMBUF_HEADROOM,
197 if (ts_params->large_mbuf_pool == NULL) {
198 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
202 /* Create mempool with smaller buffers for SGL testing */
203 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
204 NUM_LARGE_MBUFS * MAX_SEGS,
206 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
208 if (ts_params->small_mbuf_pool == NULL) {
209 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
213 /* Create mempool with big buffers for SGL testing */
214 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
217 MAX_MBUF_SEGMENT_SIZE,
219 if (ts_params->big_mbuf_pool == NULL) {
220 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
224 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
225 0, sizeof(struct priv_op_data),
227 if (ts_params->op_pool == NULL) {
228 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
232 ts_params->def_comp_xform =
233 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
234 if (ts_params->def_comp_xform == NULL) {
236 "Default compress xform could not be created\n");
239 ts_params->def_decomp_xform =
240 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
241 if (ts_params->def_decomp_xform == NULL) {
243 "Default decompress xform could not be created\n");
247 /* Initializes default values for compress/decompress xforms */
248 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
249 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
250 ts_params->def_comp_xform->compress.deflate.huffman =
251 RTE_COMP_HUFFMAN_DEFAULT;
252 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
253 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
254 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
256 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
257 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
258 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
259 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
264 testsuite_teardown();
270 generic_ut_setup(void)
272 /* Configure compressdev (one device, one queue pair) */
273 struct rte_compressdev_config config = {
274 .socket_id = rte_socket_id(),
276 .max_nb_priv_xforms = NUM_MAX_XFORMS,
280 if (rte_compressdev_configure(0, &config) < 0) {
281 RTE_LOG(ERR, USER1, "Device configuration failed\n");
285 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
286 rte_socket_id()) < 0) {
287 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
291 if (rte_compressdev_start(0) < 0) {
292 RTE_LOG(ERR, USER1, "Device could not be started\n");
300 generic_ut_teardown(void)
302 rte_compressdev_stop(0);
303 if (rte_compressdev_close(0) < 0)
304 RTE_LOG(ERR, USER1, "Device could not be closed\n");
308 test_compressdev_invalid_configuration(void)
310 struct rte_compressdev_config invalid_config;
311 struct rte_compressdev_config valid_config = {
312 .socket_id = rte_socket_id(),
314 .max_nb_priv_xforms = NUM_MAX_XFORMS,
317 struct rte_compressdev_info dev_info;
319 /* Invalid configuration with 0 queue pairs */
320 memcpy(&invalid_config, &valid_config,
321 sizeof(struct rte_compressdev_config));
322 invalid_config.nb_queue_pairs = 0;
324 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
325 "Device configuration was successful "
326 "with no queue pairs (invalid)\n");
329 * Invalid configuration with too many queue pairs
330 * (if there is an actual maximum number of queue pairs)
332 rte_compressdev_info_get(0, &dev_info);
333 if (dev_info.max_nb_queue_pairs != 0) {
334 memcpy(&invalid_config, &valid_config,
335 sizeof(struct rte_compressdev_config));
336 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
338 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
339 "Device configuration was successful "
340 "with too many queue pairs (invalid)\n");
343 /* Invalid queue pair setup, with no number of queue pairs set */
344 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
345 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
346 "Queue pair setup was successful "
347 "with no queue pairs set (invalid)\n");
353 compare_buffers(const char *buffer1, uint32_t buffer1_len,
354 const char *buffer2, uint32_t buffer2_len)
356 if (buffer1_len != buffer2_len) {
357 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
361 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
362 RTE_LOG(ERR, USER1, "Buffers are different\n");
370 * Maps compressdev and Zlib flush flags
373 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
376 case RTE_COMP_FLUSH_NONE:
378 case RTE_COMP_FLUSH_SYNC:
380 case RTE_COMP_FLUSH_FULL:
382 case RTE_COMP_FLUSH_FINAL:
385 * There should be only the values above,
386 * so this should never happen
394 compress_zlib(struct rte_comp_op *op,
395 const struct rte_comp_xform *xform, int mem_level)
399 int strategy, window_bits, comp_level;
400 int ret = TEST_FAILED;
401 uint8_t *single_src_buf = NULL;
402 uint8_t *single_dst_buf = NULL;
404 /* initialize zlib stream */
405 stream.zalloc = Z_NULL;
406 stream.zfree = Z_NULL;
407 stream.opaque = Z_NULL;
409 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
412 strategy = Z_DEFAULT_STRATEGY;
415 * Window bits is the base two logarithm of the window size (in bytes).
416 * When doing raw DEFLATE, this number will be negative.
418 window_bits = -(xform->compress.window_size);
419 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
421 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
422 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
424 comp_level = xform->compress.level;
426 if (comp_level != RTE_COMP_LEVEL_NONE)
427 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
428 window_bits, mem_level, strategy);
430 ret = deflateInit(&stream, Z_NO_COMPRESSION);
433 printf("Zlib deflate could not be initialized\n");
437 /* Assuming stateless operation */
439 if (op->m_src->nb_segs > 1) {
440 single_src_buf = rte_malloc(NULL,
441 rte_pktmbuf_pkt_len(op->m_src), 0);
442 if (single_src_buf == NULL) {
443 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
447 if (rte_pktmbuf_read(op->m_src, op->src.offset,
448 rte_pktmbuf_pkt_len(op->m_src) -
450 single_src_buf) == NULL) {
452 "Buffer could not be read entirely\n");
456 stream.avail_in = op->src.length;
457 stream.next_in = single_src_buf;
460 stream.avail_in = op->src.length;
461 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
465 if (op->m_dst->nb_segs > 1) {
467 single_dst_buf = rte_malloc(NULL,
468 rte_pktmbuf_pkt_len(op->m_dst), 0);
469 if (single_dst_buf == NULL) {
471 "Buffer could not be allocated\n");
475 stream.avail_out = op->m_dst->pkt_len;
476 stream.next_out = single_dst_buf;
478 } else {/* linear output */
479 stream.avail_out = op->m_dst->data_len;
480 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
484 /* Stateless operation, all buffer will be compressed in one go */
485 zlib_flush = map_zlib_flush_flag(op->flush_flag);
486 ret = deflate(&stream, zlib_flush);
488 if (stream.avail_in != 0) {
489 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
493 if (ret != Z_STREAM_END)
496 /* Copy data to destination SGL */
497 if (op->m_dst->nb_segs > 1) {
498 uint32_t remaining_data = stream.total_out;
499 uint8_t *src_data = single_dst_buf;
500 struct rte_mbuf *dst_buf = op->m_dst;
502 while (remaining_data > 0) {
503 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
504 uint8_t *, op->dst.offset);
506 if (remaining_data < dst_buf->data_len) {
507 memcpy(dst_data, src_data, remaining_data);
510 memcpy(dst_data, src_data, dst_buf->data_len);
511 remaining_data -= dst_buf->data_len;
512 src_data += dst_buf->data_len;
513 dst_buf = dst_buf->next;
518 op->consumed = stream.total_in;
519 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
520 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
521 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
522 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
524 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
525 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
526 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
527 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
530 op->produced = stream.total_out;
532 op->status = RTE_COMP_OP_STATUS_SUCCESS;
533 op->output_chksum = stream.adler;
535 deflateReset(&stream);
540 rte_free(single_src_buf);
541 rte_free(single_dst_buf);
547 decompress_zlib(struct rte_comp_op *op,
548 const struct rte_comp_xform *xform)
553 int ret = TEST_FAILED;
554 uint8_t *single_src_buf = NULL;
555 uint8_t *single_dst_buf = NULL;
557 /* initialize zlib stream */
558 stream.zalloc = Z_NULL;
559 stream.zfree = Z_NULL;
560 stream.opaque = Z_NULL;
563 * Window bits is the base two logarithm of the window size (in bytes).
564 * When doing raw DEFLATE, this number will be negative.
566 window_bits = -(xform->decompress.window_size);
567 ret = inflateInit2(&stream, window_bits);
570 printf("Zlib deflate could not be initialized\n");
574 /* Assuming stateless operation */
576 if (op->m_src->nb_segs > 1) {
577 single_src_buf = rte_malloc(NULL,
578 rte_pktmbuf_pkt_len(op->m_src), 0);
579 if (single_src_buf == NULL) {
580 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
583 single_dst_buf = rte_malloc(NULL,
584 rte_pktmbuf_pkt_len(op->m_dst), 0);
585 if (single_dst_buf == NULL) {
586 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
589 if (rte_pktmbuf_read(op->m_src, 0,
590 rte_pktmbuf_pkt_len(op->m_src),
591 single_src_buf) == NULL) {
593 "Buffer could not be read entirely\n");
597 stream.avail_in = op->src.length;
598 stream.next_in = single_src_buf;
599 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
600 stream.next_out = single_dst_buf;
603 stream.avail_in = op->src.length;
604 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
605 stream.avail_out = op->m_dst->data_len;
606 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
609 /* Stateless operation, all buffer will be compressed in one go */
610 zlib_flush = map_zlib_flush_flag(op->flush_flag);
611 ret = inflate(&stream, zlib_flush);
613 if (stream.avail_in != 0) {
614 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
618 if (ret != Z_STREAM_END)
621 if (op->m_src->nb_segs > 1) {
622 uint32_t remaining_data = stream.total_out;
623 uint8_t *src_data = single_dst_buf;
624 struct rte_mbuf *dst_buf = op->m_dst;
626 while (remaining_data > 0) {
627 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
630 if (remaining_data < dst_buf->data_len) {
631 memcpy(dst_data, src_data, remaining_data);
634 memcpy(dst_data, src_data, dst_buf->data_len);
635 remaining_data -= dst_buf->data_len;
636 src_data += dst_buf->data_len;
637 dst_buf = dst_buf->next;
642 op->consumed = stream.total_in;
643 op->produced = stream.total_out;
644 op->status = RTE_COMP_OP_STATUS_SUCCESS;
646 inflateReset(&stream);
656 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
657 uint32_t total_data_size,
658 struct rte_mempool *small_mbuf_pool,
659 struct rte_mempool *large_mbuf_pool,
660 uint8_t limit_segs_in_sgl,
663 uint32_t remaining_data = total_data_size;
664 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
665 struct rte_mempool *pool;
666 struct rte_mbuf *next_seg;
669 const char *data_ptr = test_buf;
673 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
674 num_remaining_segs = limit_segs_in_sgl - 1;
677 * Allocate data in the first segment (header) and
678 * copy data if test buffer is provided
680 if (remaining_data < seg_size)
681 data_size = remaining_data;
683 data_size = seg_size;
685 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
686 if (buf_ptr == NULL) {
688 "Not enough space in the 1st buffer\n");
692 if (data_ptr != NULL) {
693 /* Copy characters without NULL terminator */
694 strncpy(buf_ptr, data_ptr, data_size);
695 data_ptr += data_size;
697 remaining_data -= data_size;
698 num_remaining_segs--;
701 * Allocate the rest of the segments,
702 * copy the rest of the data and chain the segments.
704 for (i = 0; i < num_remaining_segs; i++) {
706 if (i == (num_remaining_segs - 1)) {
708 if (remaining_data > seg_size)
709 pool = large_mbuf_pool;
711 pool = small_mbuf_pool;
712 data_size = remaining_data;
714 data_size = seg_size;
715 pool = small_mbuf_pool;
718 next_seg = rte_pktmbuf_alloc(pool);
719 if (next_seg == NULL) {
721 "New segment could not be allocated "
722 "from the mempool\n");
725 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
726 if (buf_ptr == NULL) {
728 "Not enough space in the buffer\n");
729 rte_pktmbuf_free(next_seg);
732 if (data_ptr != NULL) {
733 /* Copy characters without NULL terminator */
734 strncpy(buf_ptr, data_ptr, data_size);
735 data_ptr += data_size;
737 remaining_data -= data_size;
739 ret = rte_pktmbuf_chain(head_buf, next_seg);
741 rte_pktmbuf_free(next_seg);
743 "Segment could not chained\n");
752 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
757 test_run_enqueue_dequeue(struct rte_comp_op **ops,
758 struct rte_comp_op **ops_processed,
759 unsigned int num_bufs)
761 uint16_t num_enqd, num_deqd, num_total_deqd;
762 unsigned int deqd_retries = 0;
764 /* Enqueue and dequeue all operations */
765 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
766 if (num_enqd < num_bufs) {
768 "Some operations could not be enqueued\n");
775 * If retrying a dequeue call, wait for 10 ms to allow
776 * enough time to the driver to process the operations
778 if (deqd_retries != 0) {
780 * Avoid infinite loop if not all the
781 * operations get out of the device
783 if (deqd_retries == MAX_DEQD_RETRIES) {
785 "Not all operations could be dequeued\n");
788 usleep(DEQUEUE_WAIT_TIME);
790 num_deqd = rte_compressdev_dequeue_burst(0, 0,
791 &ops_processed[num_total_deqd], num_bufs);
792 num_total_deqd += num_deqd;
795 } while (num_total_deqd < num_enqd);
801 * Arrays initialization. Input buffers preparation for compression.
803 * API that initializes all the private arrays to NULL
804 * and allocates input buffers to perform compression operations.
807 * Interim data containing session/transformation objects.
809 * The test parameters set by users (command line parameters).
810 * @param test_priv_data
811 * A container used for aggregation all the private test arrays.
817 test_setup_com_bufs(const struct interim_data_params *int_data,
818 const struct test_data_params *test_data,
819 const struct test_private_arrays *test_priv_data)
821 /* local variables: */
826 char **all_decomp_data = test_priv_data->all_decomp_data;
828 struct comp_testsuite_params *ts_params = &testsuite_params;
831 const char * const *test_bufs = int_data->test_bufs;
832 unsigned int num_bufs = int_data->num_bufs;
834 /* from test_data: */
835 unsigned int buff_type = test_data->buff_type;
836 unsigned int big_data = test_data->big_data;
838 /* from test_priv_data: */
839 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
840 struct rte_mempool *buf_pool;
842 static struct rte_mbuf_ext_shared_info inbuf_info;
844 size_t array_size = sizeof(void *) * num_bufs;
846 /* Initialize all arrays to NULL */
847 memset(test_priv_data->uncomp_bufs, 0, array_size);
848 memset(test_priv_data->comp_bufs, 0, array_size);
849 memset(test_priv_data->ops, 0, array_size);
850 memset(test_priv_data->ops_processed, 0, array_size);
851 memset(test_priv_data->priv_xforms, 0, array_size);
852 memset(test_priv_data->compressed_data_size,
853 0, sizeof(uint32_t) * num_bufs);
855 if (test_data->decompress_state == RTE_COMP_OP_STATEFUL) {
856 data_size = strlen(test_bufs[0]) + 1;
857 *all_decomp_data = rte_malloc(NULL, data_size,
858 RTE_CACHE_LINE_SIZE);
862 buf_pool = ts_params->big_mbuf_pool;
863 else if (buff_type == SGL_BOTH)
864 buf_pool = ts_params->small_mbuf_pool;
866 buf_pool = ts_params->large_mbuf_pool;
868 /* for compression uncomp_bufs is used as a source buffer */
869 /* allocation from buf_pool (mempool type) */
870 ret = rte_pktmbuf_alloc_bulk(buf_pool,
871 uncomp_bufs, num_bufs);
874 "Source mbufs could not be allocated "
875 "from the mempool\n");
879 if (test_data->use_external_mbufs) {
880 inbuf_info.free_cb = extbuf_free_callback;
881 inbuf_info.fcb_opaque = NULL;
882 rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
883 for (i = 0; i < num_bufs; i++) {
884 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
885 test_data->inbuf_memzone->addr,
886 test_data->inbuf_memzone->iova,
887 test_data->inbuf_data_size,
889 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i],
890 test_data->inbuf_data_size);
891 if (buf_ptr == NULL) {
893 "Append extra bytes to the source mbuf failed\n");
897 } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
898 for (i = 0; i < num_bufs; i++) {
899 data_size = strlen(test_bufs[i]) + 1;
900 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
902 big_data ? buf_pool : ts_params->small_mbuf_pool,
903 big_data ? buf_pool : ts_params->large_mbuf_pool,
904 big_data ? 0 : MAX_SEGS,
905 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
909 for (i = 0; i < num_bufs; i++) {
910 data_size = strlen(test_bufs[i]) + 1;
912 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
913 if (buf_ptr == NULL) {
915 "Append extra bytes to the source mbuf failed\n");
918 strlcpy(buf_ptr, test_bufs[i], data_size);
926 * Data size calculation (for both compression and decompression).
928 * Calculate size of anticipated output buffer required for both
929 * compression and decompression operations based on input int_data.
932 * Operation type: compress or decompress
933 * @param out_of_space_and_zlib
934 * Boolean value to switch into "out of space" buffer if set.
935 * To test "out-of-space" data size, zlib_decompress must be set as well.
936 * @param test_priv_data
937 * A container used for aggregation all the private test arrays.
939 * Interim data containing session/transformation objects.
941 * The test parameters set by users (command line parameters).
943 * current buffer index
947 static inline uint32_t
948 test_mbufs_calculate_data_size(
949 enum operation_type op_type,
950 unsigned int out_of_space_and_zlib,
951 const struct test_private_arrays *test_priv_data,
952 const struct interim_data_params *int_data,
953 const struct test_data_params *test_data,
956 /* local variables: */
958 struct priv_op_data *priv_data;
960 uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */
961 enum overflow_test overflow = test_data->overflow;
963 /* from test_priv_data: */
964 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
967 const char * const *test_bufs = int_data->test_bufs;
969 if (out_of_space_and_zlib)
970 data_size = OUT_OF_SPACE_BUF;
972 if (op_type == OPERATION_COMPRESSION) {
973 not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS
974 || test_data->zlib_dir == ZLIB_NONE);
976 ratio = (not_zlib_compr &&
977 (overflow == OVERFLOW_ENABLED)) ?
978 COMPRESS_BUF_SIZE_RATIO_OVERFLOW :
979 COMPRESS_BUF_SIZE_RATIO;
981 data_size = strlen(test_bufs[i]) * ratio;
984 priv_data = (struct priv_op_data *)
985 (ops_processed[i] + 1);
986 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
995 * Memory buffers preparation (for both compression and decompression).
997 * Function allocates output buffers to perform compression
998 * or decompression operations depending on value of op_type.
1001 * Operation type: compress or decompress
1002 * @param out_of_space_and_zlib
1003 * Boolean value to switch into "out of space" buffer if set.
1004 * To test "out-of-space" data size, zlib_decompress must be set as well.
1005 * @param test_priv_data
1006 * A container used for aggregation all the private test arrays.
1008 * Interim data containing session/transformation objects.
1010 * The test parameters set by users (command line parameters).
1011 * @param current_extbuf_info,
1012 * The structure containing all the information related to external mbufs
1018 test_setup_output_bufs(
1019 enum operation_type op_type,
1020 unsigned int out_of_space_and_zlib,
1021 const struct test_private_arrays *test_priv_data,
1022 const struct interim_data_params *int_data,
1023 const struct test_data_params *test_data,
1024 struct rte_mbuf_ext_shared_info *current_extbuf_info)
1026 /* local variables: */
1032 /* from test_priv_data: */
1033 struct rte_mbuf **current_bufs;
1035 /* from int_data: */
1036 unsigned int num_bufs = int_data->num_bufs;
1038 /* from test_data: */
1039 unsigned int buff_type = test_data->buff_type;
1040 unsigned int big_data = test_data->big_data;
1041 const struct rte_memzone *current_memzone;
1043 struct comp_testsuite_params *ts_params = &testsuite_params;
1044 struct rte_mempool *buf_pool;
1047 buf_pool = ts_params->big_mbuf_pool;
1048 else if (buff_type == SGL_BOTH)
1049 buf_pool = ts_params->small_mbuf_pool;
1051 buf_pool = ts_params->large_mbuf_pool;
1053 if (op_type == OPERATION_COMPRESSION)
1054 current_bufs = test_priv_data->comp_bufs;
1056 current_bufs = test_priv_data->uncomp_bufs;
1058 /* the mbufs allocation*/
1059 ret = rte_pktmbuf_alloc_bulk(buf_pool, current_bufs, num_bufs);
1062 "Destination mbufs could not be allocated "
1063 "from the mempool\n");
1067 if (test_data->use_external_mbufs) {
1068 current_extbuf_info->free_cb = extbuf_free_callback;
1069 current_extbuf_info->fcb_opaque = NULL;
1070 rte_mbuf_ext_refcnt_set(current_extbuf_info, 1);
1071 if (op_type == OPERATION_COMPRESSION)
1072 current_memzone = test_data->compbuf_memzone;
1074 current_memzone = test_data->uncompbuf_memzone;
1076 for (i = 0; i < num_bufs; i++) {
1077 rte_pktmbuf_attach_extbuf(current_bufs[i],
1078 current_memzone->addr,
1079 current_memzone->iova,
1080 current_memzone->len,
1081 current_extbuf_info);
1082 rte_pktmbuf_append(current_bufs[i],
1083 current_memzone->len);
1086 for (i = 0; i < num_bufs; i++) {
1088 /* data size calculation */
1089 data_size = test_mbufs_calculate_data_size(
1091 out_of_space_and_zlib,
1097 /* data allocation */
1098 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1099 ret = prepare_sgl_bufs(NULL, current_bufs[i],
1101 big_data ? buf_pool :
1102 ts_params->small_mbuf_pool,
1103 big_data ? buf_pool :
1104 ts_params->large_mbuf_pool,
1105 big_data ? 0 : MAX_SEGS,
1106 big_data ? MAX_DATA_MBUF_SIZE :
1111 buf_ptr = rte_pktmbuf_append(current_bufs[i],
1113 if (buf_ptr == NULL) {
1115 "Append extra bytes to the destination mbuf failed\n");
1126 * The main compression function.
1128 * Function performs compression operation.
1129 * Operation(s) configuration, depending on CLI parameters.
1130 * Operation(s) processing.
1133 * Interim data containing session/transformation objects.
1135 * The test parameters set by users (command line parameters).
1136 * @param test_priv_data
1137 * A container used for aggregation all the private test arrays.
1143 test_deflate_comp_run(const struct interim_data_params *int_data,
1144 const struct test_data_params *test_data,
1145 const struct test_private_arrays *test_priv_data)
1147 /* local variables: */
1148 struct priv_op_data *priv_data;
1150 uint16_t num_priv_xforms = 0;
1155 struct comp_testsuite_params *ts_params = &testsuite_params;
1157 /* from test_data: */
1158 enum rte_comp_op_type operation_type = test_data->compress_state;
1159 unsigned int zlib_compress =
1160 (test_data->zlib_dir == ZLIB_ALL ||
1161 test_data->zlib_dir == ZLIB_COMPRESS);
1163 /* from int_data: */
1164 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1165 unsigned int num_xforms = int_data->num_xforms;
1166 unsigned int num_bufs = int_data->num_bufs;
1168 /* from test_priv_data: */
1169 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1170 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1171 struct rte_comp_op **ops = test_priv_data->ops;
1172 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1173 void **priv_xforms = test_priv_data->priv_xforms;
1175 const struct rte_compressdev_capabilities *capa =
1176 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1178 /* Build the compression operations */
1179 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1182 "Compress operations could not be allocated "
1183 "from the mempool\n");
1188 for (i = 0; i < num_bufs; i++) {
1189 ops[i]->m_src = uncomp_bufs[i];
1190 ops[i]->m_dst = comp_bufs[i];
1191 ops[i]->src.offset = 0;
1192 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
1193 ops[i]->dst.offset = 0;
1195 if (operation_type == RTE_COMP_OP_STATELESS) {
1196 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1199 "Compression: stateful operations are not "
1200 "supported in these tests yet\n");
1204 ops[i]->input_chksum = 0;
1206 * Store original operation index in private data,
1207 * since ordering does not have to be maintained,
1208 * when dequeueing from compressdev, so a comparison
1209 * at the end of the test can be done.
1211 priv_data = (struct priv_op_data *) (ops[i] + 1);
1212 priv_data->orig_idx = i;
1215 /* Compress data (either with Zlib API or compressdev API */
1216 if (zlib_compress) {
1217 for (i = 0; i < num_bufs; i++) {
1218 const struct rte_comp_xform *compress_xform =
1219 compress_xforms[i % num_xforms];
1220 ret = compress_zlib(ops[i], compress_xform,
1227 ops_processed[i] = ops[i];
1230 /* Create compress private xform data */
1231 for (i = 0; i < num_xforms; i++) {
1232 ret = rte_compressdev_private_xform_create(0,
1233 (const struct rte_comp_xform *)
1238 "Compression private xform "
1239 "could not be created\n");
1245 if (capa->comp_feature_flags &
1246 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1247 /* Attach shareable private xform data to ops */
1248 for (i = 0; i < num_bufs; i++)
1249 ops[i]->private_xform =
1250 priv_xforms[i % num_xforms];
1252 /* Create rest of the private xforms for the other ops */
1253 for (i = num_xforms; i < num_bufs; i++) {
1254 ret = rte_compressdev_private_xform_create(0,
1255 compress_xforms[i % num_xforms],
1259 "Compression private xform "
1260 "could not be created\n");
1266 /* Attach non shareable private xform data to ops */
1267 for (i = 0; i < num_bufs; i++)
1268 ops[i]->private_xform = priv_xforms[i];
1272 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
1275 "Compression: enqueue/dequeue operation failed\n");
1280 for (i = 0; i < num_bufs; i++) {
1281 test_priv_data->compressed_data_size[i] +=
1282 ops_processed[i]->produced;
1284 if (ops_processed[i]->status ==
1285 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
1288 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1289 ops[i]->src.offset +=
1290 ops_processed[i]->consumed;
1291 ops[i]->src.length -=
1292 ops_processed[i]->consumed;
1293 ops[i]->dst.offset +=
1294 ops_processed[i]->produced;
1296 buf_ptr = rte_pktmbuf_append(
1298 ops_processed[i]->produced);
1300 if (buf_ptr == NULL) {
1302 "Data recovery: append extra bytes to the current mbuf failed\n");
1312 /* Free resources */
1314 for (i = 0; i < num_bufs; i++) {
1315 rte_comp_op_free(ops[i]);
1316 ops_processed[i] = NULL;
1319 /* Free compress private xforms */
1320 for (i = 0; i < num_priv_xforms; i++) {
1321 if (priv_xforms[i] != NULL) {
1322 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1323 priv_xforms[i] = NULL;
1331 * Prints out the test report. Memory freeing.
1333 * Called after successful compression.
1334 * Operation(s) status validation and decompression buffers freeing.
1336 * -1 returned if function fail.
1339 * Interim data containing session/transformation objects.
1341 * The test parameters set by users (command line parameters).
1342 * @param test_priv_data
1343 * A container used for aggregation all the private test arrays.
1345 * - 2: Some operation is not supported
1346 * - 1: Decompression should be skipped
1351 test_deflate_comp_finalize(const struct interim_data_params *int_data,
1352 const struct test_data_params *test_data,
1353 const struct test_private_arrays *test_priv_data)
1355 /* local variables: */
1357 struct priv_op_data *priv_data;
1359 /* from int_data: */
1360 unsigned int num_xforms = int_data->num_xforms;
1361 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1362 uint16_t *buf_idx = int_data->buf_idx;
1363 unsigned int num_bufs = int_data->num_bufs;
1365 /* from test_priv_data: */
1366 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1367 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1368 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1369 struct rte_comp_op **ops = test_priv_data->ops;
1371 /* from test_data: */
1372 unsigned int out_of_space = test_data->out_of_space;
1373 unsigned int zlib_compress =
1374 (test_data->zlib_dir == ZLIB_ALL ||
1375 test_data->zlib_dir == ZLIB_COMPRESS);
1376 unsigned int zlib_decompress =
1377 (test_data->zlib_dir == ZLIB_ALL ||
1378 test_data->zlib_dir == ZLIB_DECOMPRESS);
1380 for (i = 0; i < num_bufs; i++) {
1381 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1382 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1383 const struct rte_comp_compress_xform *compress_xform =
1384 &compress_xforms[xform_idx]->compress;
1385 enum rte_comp_huffman huffman_type =
1386 compress_xform->deflate.huffman;
1387 char engine[] = "zlib (directly, not PMD)";
1388 if (zlib_decompress)
1389 strlcpy(engine, "PMD", sizeof(engine));
1391 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1392 " %u bytes (level = %d, huffman = %s)\n",
1393 buf_idx[priv_data->orig_idx], engine,
1394 ops_processed[i]->consumed, ops_processed[i]->produced,
1395 compress_xform->level,
1396 huffman_type_strings[huffman_type]);
1397 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1398 ops_processed[i]->consumed == 0 ? 0 :
1399 (float)ops_processed[i]->produced /
1400 ops_processed[i]->consumed * 100);
1401 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1402 compress_checksum[i] = ops_processed[i]->output_chksum;
1407 * Check operation status and free source mbufs (destination mbuf and
1408 * compress operation information is needed for the decompression stage)
1410 for (i = 0; i < num_bufs; i++) {
1411 if (out_of_space && !zlib_compress) {
1412 if (ops_processed[i]->status !=
1413 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1415 "Operation without expected out of "
1416 "space status error\n");
1422 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1423 if (test_data->overflow == OVERFLOW_ENABLED) {
1424 if (ops_processed[i]->status ==
1425 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1426 RTE_LOG(INFO, USER1,
1427 "Out-of-space-recoverable functionality"
1428 " is not supported on this device\n");
1434 "Some operations were not successful\n");
1437 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1438 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1439 uncomp_bufs[priv_data->orig_idx] = NULL;
1442 if (out_of_space && !zlib_compress)
1449 * The main decompression function.
1451 * Function performs decompression operation.
1452 * Operation(s) configuration, depending on CLI parameters.
1453 * Operation(s) processing.
1456 * Interim data containing session/transformation objects.
1458 * The test parameters set by users (command line parameters).
1459 * @param test_priv_data
1460 * A container used for aggregation all the private test arrays.
1466 test_deflate_decomp_run(const struct interim_data_params *int_data,
1467 const struct test_data_params *test_data,
1468 struct test_private_arrays *test_priv_data)
1471 /* local variables: */
1472 struct priv_op_data *priv_data;
1474 uint16_t num_priv_xforms = 0;
1478 struct comp_testsuite_params *ts_params = &testsuite_params;
1480 /* from test_data: */
1481 enum rte_comp_op_type operation_type = test_data->decompress_state;
1482 unsigned int zlib_decompress =
1483 (test_data->zlib_dir == ZLIB_ALL ||
1484 test_data->zlib_dir == ZLIB_DECOMPRESS);
1486 /* from int_data: */
1487 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
1488 unsigned int num_xforms = int_data->num_xforms;
1489 unsigned int num_bufs = int_data->num_bufs;
1491 /* from test_priv_data: */
1492 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1493 struct rte_comp_op **ops = test_priv_data->ops;
1494 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1495 void **priv_xforms = test_priv_data->priv_xforms;
1496 uint32_t *compressed_data_size = test_priv_data->compressed_data_size;
1497 void **stream = test_priv_data->stream;
1499 const struct rte_compressdev_capabilities *capa =
1500 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1502 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1505 "Decompress operations could not be allocated "
1506 "from the mempool\n");
1511 /* Source buffer is the compressed data from the previous operations */
1512 for (i = 0; i < num_bufs; i++) {
1513 ops[i]->m_src = ops_processed[i]->m_dst;
1514 ops[i]->m_dst = uncomp_bufs[i];
1515 ops[i]->src.offset = 0;
1517 * Set the length of the compressed data to the
1518 * number of bytes that were produced in the previous stage
1521 if (compressed_data_size[i])
1522 ops[i]->src.length = compressed_data_size[i];
1524 ops[i]->src.length = ops_processed[i]->produced;
1526 ops[i]->dst.offset = 0;
1528 if (operation_type == RTE_COMP_OP_STATELESS) {
1529 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1530 ops[i]->op_type = RTE_COMP_OP_STATELESS;
1531 } else if (!zlib_decompress) {
1532 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1533 ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1536 "Decompression: stateful operations are"
1537 " not supported in these tests yet\n");
1541 ops[i]->input_chksum = 0;
1543 * Copy private data from previous operations,
1544 * to keep the pointer to the original buffer
1546 memcpy(ops[i] + 1, ops_processed[i] + 1,
1547 sizeof(struct priv_op_data));
1551 * Free the previous compress operations,
1552 * as they are not needed anymore
1554 rte_comp_op_bulk_free(ops_processed, num_bufs);
1556 /* Decompress data (either with Zlib API or compressdev API */
1557 if (zlib_decompress) {
1558 for (i = 0; i < num_bufs; i++) {
1559 priv_data = (struct priv_op_data *)(ops[i] + 1);
1560 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1561 const struct rte_comp_xform *decompress_xform =
1562 decompress_xforms[xform_idx];
1564 ret = decompress_zlib(ops[i], decompress_xform);
1570 ops_processed[i] = ops[i];
1573 if (operation_type == RTE_COMP_OP_STATELESS) {
1574 /* Create decompress private xform data */
1575 for (i = 0; i < num_xforms; i++) {
1576 ret = rte_compressdev_private_xform_create(0,
1577 (const struct rte_comp_xform *)
1578 decompress_xforms[i],
1582 "Decompression private xform "
1583 "could not be created\n");
1590 if (capa->comp_feature_flags &
1591 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1592 /* Attach shareable private xform data to ops */
1593 for (i = 0; i < num_bufs; i++) {
1594 priv_data = (struct priv_op_data *)
1596 uint16_t xform_idx =
1597 priv_data->orig_idx % num_xforms;
1598 ops[i]->private_xform =
1599 priv_xforms[xform_idx];
1602 /* Create rest of the private xforms */
1603 /* for the other ops */
1604 for (i = num_xforms; i < num_bufs; i++) {
1606 rte_compressdev_private_xform_create(0,
1607 decompress_xforms[i % num_xforms],
1611 "Decompression private xform"
1612 " could not be created\n");
1619 /* Attach non shareable private xform data */
1621 for (i = 0; i < num_bufs; i++) {
1622 priv_data = (struct priv_op_data *)
1624 uint16_t xform_idx =
1625 priv_data->orig_idx;
1626 ops[i]->private_xform =
1627 priv_xforms[xform_idx];
1631 /* Create a stream object for stateful decompression */
1632 ret = rte_compressdev_stream_create(0,
1633 decompress_xforms[0], stream);
1636 "Decompression stream could not be created, error %d\n",
1641 /* Attach stream to ops */
1642 for (i = 0; i < num_bufs; i++)
1643 ops[i]->stream = *stream;
1646 test_priv_data->num_priv_xforms = num_priv_xforms;
1654 * Prints out the test report. Memory freeing.
1656 * Called after successful decompression.
1657 * Operation(s) status validation and compression buffers freeing.
1659 * -1 returned if function fail.
1662 * Interim data containing session/transformation objects.
1664 * The test parameters set by users (command line parameters).
1665 * @param test_priv_data
1666 * A container used for aggregation all the private test arrays.
1668 * - 2: Next step must be executed by the caller (stateful decompression only)
1669 * - 1: On success (caller should stop and exit)
1674 test_deflate_decomp_finalize(const struct interim_data_params *int_data,
1675 const struct test_data_params *test_data,
1676 const struct test_private_arrays *test_priv_data)
1678 /* local variables: */
1680 struct priv_op_data *priv_data;
1681 static unsigned int step;
1683 /* from int_data: */
1684 uint16_t *buf_idx = int_data->buf_idx;
1685 unsigned int num_bufs = int_data->num_bufs;
1686 const char * const *test_bufs = int_data->test_bufs;
1687 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1689 /* from test_priv_data: */
1690 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1691 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1692 struct rte_comp_op **ops = test_priv_data->ops;
1693 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1694 unsigned int *decomp_produced_data_size =
1695 test_priv_data->decomp_produced_data_size;
1696 char **all_decomp_data = test_priv_data->all_decomp_data;
1698 /* from test_data: */
1699 unsigned int out_of_space = test_data->out_of_space;
1700 enum rte_comp_op_type operation_type = test_data->decompress_state;
1702 unsigned int zlib_compress =
1703 (test_data->zlib_dir == ZLIB_ALL ||
1704 test_data->zlib_dir == ZLIB_COMPRESS);
1705 unsigned int zlib_decompress =
1706 (test_data->zlib_dir == ZLIB_ALL ||
1707 test_data->zlib_dir == ZLIB_DECOMPRESS);
1709 for (i = 0; i < num_bufs; i++) {
1710 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1711 char engine[] = "zlib, (directly, no PMD)";
1713 strlcpy(engine, "pmd", sizeof(engine));
1714 RTE_LOG(DEBUG, USER1,
1715 "Buffer %u decompressed by %s from %u to %u bytes\n",
1716 buf_idx[priv_data->orig_idx], engine,
1717 ops_processed[i]->consumed, ops_processed[i]->produced);
1722 * Check operation status and free source mbuf (destination mbuf and
1723 * compress operation information is still needed)
1725 for (i = 0; i < num_bufs; i++) {
1726 if (out_of_space && !zlib_decompress) {
1727 if (ops_processed[i]->status !=
1728 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1731 "Operation without expected out of "
1732 "space status error\n");
1738 if (operation_type == RTE_COMP_OP_STATEFUL
1739 && (ops_processed[i]->status ==
1740 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1741 || ops_processed[i]->status ==
1742 RTE_COMP_OP_STATUS_SUCCESS)) {
1743 /* collect the output into all_decomp_data */
1744 const void *ptr = rte_pktmbuf_read(
1745 ops_processed[i]->m_dst,
1746 ops_processed[i]->dst.offset,
1747 ops_processed[i]->produced,
1749 *decomp_produced_data_size);
1750 if (ptr != *all_decomp_data +
1751 *decomp_produced_data_size)
1752 rte_memcpy(*all_decomp_data +
1753 *decomp_produced_data_size,
1754 ptr, ops_processed[i]->produced);
1756 *decomp_produced_data_size +=
1757 ops_processed[i]->produced;
1758 if (ops_processed[i]->src.length >
1759 ops_processed[i]->consumed) {
1760 if (ops_processed[i]->status ==
1761 RTE_COMP_OP_STATUS_SUCCESS) {
1763 "Operation finished too early\n");
1767 if (step >= test_data->decompress_steps_max) {
1769 "Operation exceeded maximum steps\n");
1772 ops[i] = ops_processed[i];
1774 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1775 ops[i]->src.offset +=
1776 ops_processed[i]->consumed;
1777 ops[i]->src.length -=
1778 ops_processed[i]->consumed;
1779 /* repeat the operation */
1783 /* Compare the original stream with the */
1784 /* decompressed stream (in size and the data) */
1785 priv_data = (struct priv_op_data *)
1786 (ops_processed[i] + 1);
1788 test_bufs[priv_data->orig_idx];
1789 const char *buf2 = *all_decomp_data;
1791 if (compare_buffers(buf1, strlen(buf1) + 1,
1792 buf2, *decomp_produced_data_size) < 0)
1794 /* Test checksums */
1795 if (compress_xforms[0]->compress.chksum
1796 != RTE_COMP_CHECKSUM_NONE) {
1797 if (ops_processed[i]->output_chksum
1798 != compress_checksum[i]) {
1800 "The checksums differ\n"
1801 "Compression Checksum: %" PRIu64 "\tDecompression "
1802 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1803 ops_processed[i]->output_chksum);
1808 } else if (ops_processed[i]->status !=
1809 RTE_COMP_OP_STATUS_SUCCESS) {
1811 "Some operations were not successful\n");
1814 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1815 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1816 comp_bufs[priv_data->orig_idx] = NULL;
1819 if (out_of_space && !zlib_decompress)
1826 * Validation of the output (compression/decompression) data.
1828 * The function compares the source stream with the output stream,
1829 * after decompression, to check if compression/decompression
1831 * -1 returned if function fail.
1834 * Interim data containing session/transformation objects.
1836 * The test parameters set by users (command line parameters).
1837 * @param test_priv_data
1838 * A container used for aggregation all the private test arrays.
1844 test_results_validation(const struct interim_data_params *int_data,
1845 const struct test_data_params *test_data,
1846 const struct test_private_arrays *test_priv_data)
1848 /* local variables: */
1850 struct priv_op_data *priv_data;
1853 char *contig_buf = NULL;
1856 /* from int_data: */
1857 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1858 unsigned int num_bufs = int_data->num_bufs;
1859 const char * const *test_bufs = int_data->test_bufs;
1861 /* from test_priv_data: */
1862 uint64_t *compress_checksum = test_priv_data->compress_checksum;
1863 struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1866 * Compare the original stream with the decompressed stream
1867 * (in size and the data)
1869 for (i = 0; i < num_bufs; i++) {
1870 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1871 buf1 = test_data->use_external_mbufs ?
1872 test_data->inbuf_memzone->addr :
1873 test_bufs[priv_data->orig_idx];
1874 data_size = test_data->use_external_mbufs ?
1875 test_data->inbuf_data_size :
1878 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1879 if (contig_buf == NULL) {
1880 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1885 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1886 ops_processed[i]->produced, contig_buf);
1887 if (compare_buffers(buf1, data_size,
1888 buf2, ops_processed[i]->produced) < 0)
1891 /* Test checksums */
1892 if (compress_xforms[0]->compress.chksum !=
1893 RTE_COMP_CHECKSUM_NONE) {
1894 if (ops_processed[i]->output_chksum !=
1895 compress_checksum[i]) {
1896 RTE_LOG(ERR, USER1, "The checksums differ\n"
1897 "Compression Checksum: %" PRIu64 "\tDecompression "
1898 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1899 ops_processed[i]->output_chksum);
1904 rte_free(contig_buf);
1910 rte_free(contig_buf);
1915 * Compresses and decompresses input stream with compressdev API and Zlib API
1917 * Basic test function. Common for all the functional tests.
1918 * -1 returned if function fail.
1921 * Interim data containing session/transformation objects.
1923 * The test parameters set by users (command line parameters).
1925 * - 1: Some operation not supported
1931 test_deflate_comp_decomp(const struct interim_data_params *int_data,
1932 const struct test_data_params *test_data)
1934 unsigned int num_bufs = int_data->num_bufs;
1935 unsigned int out_of_space = test_data->out_of_space;
1937 void *stream = NULL;
1938 char *all_decomp_data = NULL;
1939 unsigned int decomp_produced_data_size = 0;
1941 int ret_status = -1;
1943 struct rte_mbuf *uncomp_bufs[num_bufs];
1944 struct rte_mbuf *comp_bufs[num_bufs];
1945 struct rte_comp_op *ops[num_bufs];
1946 struct rte_comp_op *ops_processed[num_bufs];
1947 void *priv_xforms[num_bufs];
1950 uint64_t compress_checksum[num_bufs];
1951 uint32_t compressed_data_size[num_bufs];
1952 char *contig_buf = NULL;
1954 struct rte_mbuf_ext_shared_info compbuf_info;
1955 struct rte_mbuf_ext_shared_info decompbuf_info;
1957 const struct rte_compressdev_capabilities *capa;
1959 /* Compressing with CompressDev */
1960 unsigned int zlib_compress =
1961 (test_data->zlib_dir == ZLIB_ALL ||
1962 test_data->zlib_dir == ZLIB_COMPRESS);
1963 unsigned int zlib_decompress =
1964 (test_data->zlib_dir == ZLIB_ALL ||
1965 test_data->zlib_dir == ZLIB_DECOMPRESS);
1967 struct test_private_arrays test_priv_data;
1969 test_priv_data.uncomp_bufs = uncomp_bufs;
1970 test_priv_data.comp_bufs = comp_bufs;
1971 test_priv_data.ops = ops;
1972 test_priv_data.ops_processed = ops_processed;
1973 test_priv_data.priv_xforms = priv_xforms;
1974 test_priv_data.compress_checksum = compress_checksum;
1975 test_priv_data.compressed_data_size = compressed_data_size;
1977 test_priv_data.stream = &stream;
1978 test_priv_data.all_decomp_data = &all_decomp_data;
1979 test_priv_data.decomp_produced_data_size = &decomp_produced_data_size;
1981 test_priv_data.num_priv_xforms = 0; /* it's used for deompression only */
1983 capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1986 "Compress device does not support DEFLATE\n");
1989 //test_objects_init(&test_priv_data, num_bufs);
1991 /* Prepare the source mbufs with the data */
1992 ret = test_setup_com_bufs(int_data, test_data, &test_priv_data);
2000 /* Prepare output (destination) mbufs for compressed data */
2001 ret = test_setup_output_bufs(
2002 OPERATION_COMPRESSION,
2003 out_of_space == 1 && !zlib_compress,
2013 /* Run compression */
2014 ret = test_deflate_comp_run(int_data, test_data, &test_priv_data);
2020 ret = test_deflate_comp_finalize(int_data, test_data, &test_priv_data);
2024 } else if (ret == 1) {
2027 } else if (ret == 2) {
2028 ret_status = 1; /* some operation not supported */
2034 /* Prepare output (destination) mbufs for decompressed data */
2035 ret = test_setup_output_bufs(
2036 OPERATION_DECOMPRESSION,
2037 out_of_space == 1 && !zlib_decompress,
2047 /* Run decompression */
2048 ret = test_deflate_decomp_run(int_data, test_data, &test_priv_data);
2054 if (!zlib_decompress) {
2055 next_step: /* next step for stateful decompression only */
2056 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
2060 "Decompression: enqueue/dequeue operation failed\n");
2064 ret = test_deflate_decomp_finalize(int_data, test_data, &test_priv_data);
2068 } else if (ret == 1) {
2071 } else if (ret == 2) {
2075 /* FINAL PROCESSING */
2077 ret = test_results_validation(int_data, test_data, &test_priv_data);
2085 /* Free resources */
2088 rte_compressdev_stream_free(0, stream);
2089 if (all_decomp_data != NULL)
2090 rte_free(all_decomp_data);
2092 /* Free compress private xforms */
2093 for (i = 0; i < test_priv_data.num_priv_xforms; i++) {
2094 if (priv_xforms[i] != NULL) {
2095 rte_compressdev_private_xform_free(0, priv_xforms[i]);
2096 priv_xforms[i] = NULL;
2100 for (i = 0; i < num_bufs; i++) {
2101 rte_pktmbuf_free(uncomp_bufs[i]);
2102 rte_pktmbuf_free(comp_bufs[i]);
2103 rte_comp_op_free(ops[i]);
2104 rte_comp_op_free(ops_processed[i]);
2106 rte_free(contig_buf);
2112 test_compressdev_deflate_stateless_fixed(void)
2114 struct comp_testsuite_params *ts_params = &testsuite_params;
2117 const struct rte_compressdev_capabilities *capab;
2119 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2120 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2122 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2125 struct rte_comp_xform *compress_xform =
2126 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2128 if (compress_xform == NULL) {
2130 "Compress xform could not be created\n");
2135 memcpy(compress_xform, ts_params->def_comp_xform,
2136 sizeof(struct rte_comp_xform));
2137 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
2139 struct interim_data_params int_data = {
2144 &ts_params->def_decomp_xform,
2148 struct test_data_params test_data = {
2149 .compress_state = RTE_COMP_OP_STATELESS,
2150 .decompress_state = RTE_COMP_OP_STATELESS,
2151 .buff_type = LB_BOTH,
2152 .zlib_dir = ZLIB_DECOMPRESS,
2155 .overflow = OVERFLOW_DISABLED
2158 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2159 int_data.test_bufs = &compress_test_bufs[i];
2160 int_data.buf_idx = &i;
2162 /* Compress with compressdev, decompress with Zlib */
2163 test_data.zlib_dir = ZLIB_DECOMPRESS;
2164 ret = test_deflate_comp_decomp(&int_data, &test_data);
2168 /* Compress with Zlib, decompress with compressdev */
2169 test_data.zlib_dir = ZLIB_COMPRESS;
2170 ret = test_deflate_comp_decomp(&int_data, &test_data);
2178 rte_free(compress_xform);
2183 test_compressdev_deflate_stateless_dynamic(void)
2185 struct comp_testsuite_params *ts_params = &testsuite_params;
2188 struct rte_comp_xform *compress_xform =
2189 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2191 const struct rte_compressdev_capabilities *capab;
2193 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2194 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2196 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2199 if (compress_xform == NULL) {
2201 "Compress xform could not be created\n");
2206 memcpy(compress_xform, ts_params->def_comp_xform,
2207 sizeof(struct rte_comp_xform));
2208 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
2210 struct interim_data_params int_data = {
2215 &ts_params->def_decomp_xform,
2219 struct test_data_params test_data = {
2220 .compress_state = RTE_COMP_OP_STATELESS,
2221 .decompress_state = RTE_COMP_OP_STATELESS,
2222 .buff_type = LB_BOTH,
2223 .zlib_dir = ZLIB_DECOMPRESS,
2226 .overflow = OVERFLOW_DISABLED
2229 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2230 int_data.test_bufs = &compress_test_bufs[i];
2231 int_data.buf_idx = &i;
2233 /* Compress with compressdev, decompress with Zlib */
2234 test_data.zlib_dir = ZLIB_DECOMPRESS;
2235 ret = test_deflate_comp_decomp(&int_data, &test_data);
2239 /* Compress with Zlib, decompress with compressdev */
2240 test_data.zlib_dir = ZLIB_COMPRESS;
2241 ret = test_deflate_comp_decomp(&int_data, &test_data);
2249 rte_free(compress_xform);
2254 test_compressdev_deflate_stateless_multi_op(void)
2256 struct comp_testsuite_params *ts_params = &testsuite_params;
2257 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
2258 uint16_t buf_idx[num_bufs];
2262 for (i = 0; i < num_bufs; i++)
2265 struct interim_data_params int_data = {
2269 &ts_params->def_comp_xform,
2270 &ts_params->def_decomp_xform,
2274 struct test_data_params test_data = {
2275 .compress_state = RTE_COMP_OP_STATELESS,
2276 .decompress_state = RTE_COMP_OP_STATELESS,
2277 .buff_type = LB_BOTH,
2278 .zlib_dir = ZLIB_DECOMPRESS,
2281 .overflow = OVERFLOW_DISABLED
2284 /* Compress with compressdev, decompress with Zlib */
2285 test_data.zlib_dir = ZLIB_DECOMPRESS;
2286 ret = test_deflate_comp_decomp(&int_data, &test_data);
2290 /* Compress with Zlib, decompress with compressdev */
2291 test_data.zlib_dir = ZLIB_COMPRESS;
2292 ret = test_deflate_comp_decomp(&int_data, &test_data);
2296 return TEST_SUCCESS;
2300 test_compressdev_deflate_stateless_multi_level(void)
2302 struct comp_testsuite_params *ts_params = &testsuite_params;
2306 struct rte_comp_xform *compress_xform =
2307 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2309 if (compress_xform == NULL) {
2311 "Compress xform could not be created\n");
2316 memcpy(compress_xform, ts_params->def_comp_xform,
2317 sizeof(struct rte_comp_xform));
2319 struct interim_data_params int_data = {
2324 &ts_params->def_decomp_xform,
2328 struct test_data_params test_data = {
2329 .compress_state = RTE_COMP_OP_STATELESS,
2330 .decompress_state = RTE_COMP_OP_STATELESS,
2331 .buff_type = LB_BOTH,
2332 .zlib_dir = ZLIB_DECOMPRESS,
2335 .overflow = OVERFLOW_DISABLED
2338 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2339 int_data.test_bufs = &compress_test_bufs[i];
2340 int_data.buf_idx = &i;
2342 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
2344 compress_xform->compress.level = level;
2345 /* Compress with compressdev, decompress with Zlib */
2346 test_data.zlib_dir = ZLIB_DECOMPRESS;
2347 ret = test_deflate_comp_decomp(&int_data, &test_data);
2356 rte_free(compress_xform);
2360 #define NUM_XFORMS 3
2362 test_compressdev_deflate_stateless_multi_xform(void)
2364 struct comp_testsuite_params *ts_params = &testsuite_params;
2365 uint16_t num_bufs = NUM_XFORMS;
2366 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
2367 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
2368 const char *test_buffers[NUM_XFORMS];
2370 unsigned int level = RTE_COMP_LEVEL_MIN;
2371 uint16_t buf_idx[num_bufs];
2374 /* Create multiple xforms with various levels */
2375 for (i = 0; i < NUM_XFORMS; i++) {
2376 compress_xforms[i] = rte_malloc(NULL,
2377 sizeof(struct rte_comp_xform), 0);
2378 if (compress_xforms[i] == NULL) {
2380 "Compress xform could not be created\n");
2385 memcpy(compress_xforms[i], ts_params->def_comp_xform,
2386 sizeof(struct rte_comp_xform));
2387 compress_xforms[i]->compress.level = level;
2390 decompress_xforms[i] = rte_malloc(NULL,
2391 sizeof(struct rte_comp_xform), 0);
2392 if (decompress_xforms[i] == NULL) {
2394 "Decompress xform could not be created\n");
2399 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
2400 sizeof(struct rte_comp_xform));
2403 for (i = 0; i < NUM_XFORMS; i++) {
2405 /* Use the same buffer in all sessions */
2406 test_buffers[i] = compress_test_bufs[0];
2409 struct interim_data_params int_data = {
2418 struct test_data_params test_data = {
2419 .compress_state = RTE_COMP_OP_STATELESS,
2420 .decompress_state = RTE_COMP_OP_STATELESS,
2421 .buff_type = LB_BOTH,
2422 .zlib_dir = ZLIB_DECOMPRESS,
2425 .overflow = OVERFLOW_DISABLED
2428 /* Compress with compressdev, decompress with Zlib */
2429 ret = test_deflate_comp_decomp(&int_data, &test_data);
2436 for (i = 0; i < NUM_XFORMS; i++) {
2437 rte_free(compress_xforms[i]);
2438 rte_free(decompress_xforms[i]);
2445 test_compressdev_deflate_stateless_sgl(void)
2447 struct comp_testsuite_params *ts_params = &testsuite_params;
2450 const struct rte_compressdev_capabilities *capab;
2452 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2453 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2455 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2458 struct interim_data_params int_data = {
2462 &ts_params->def_comp_xform,
2463 &ts_params->def_decomp_xform,
2467 struct test_data_params test_data = {
2468 .compress_state = RTE_COMP_OP_STATELESS,
2469 .decompress_state = RTE_COMP_OP_STATELESS,
2470 .buff_type = SGL_BOTH,
2471 .zlib_dir = ZLIB_DECOMPRESS,
2474 .overflow = OVERFLOW_DISABLED
2477 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2478 int_data.test_bufs = &compress_test_bufs[i];
2479 int_data.buf_idx = &i;
2481 /* Compress with compressdev, decompress with Zlib */
2482 test_data.zlib_dir = ZLIB_DECOMPRESS;
2483 ret = test_deflate_comp_decomp(&int_data, &test_data);
2487 /* Compress with Zlib, decompress with compressdev */
2488 test_data.zlib_dir = ZLIB_COMPRESS;
2489 ret = test_deflate_comp_decomp(&int_data, &test_data);
2493 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
2494 /* Compress with compressdev, decompress with Zlib */
2495 test_data.zlib_dir = ZLIB_DECOMPRESS;
2496 test_data.buff_type = SGL_TO_LB;
2497 ret = test_deflate_comp_decomp(&int_data, &test_data);
2501 /* Compress with Zlib, decompress with compressdev */
2502 test_data.zlib_dir = ZLIB_COMPRESS;
2503 test_data.buff_type = SGL_TO_LB;
2504 ret = test_deflate_comp_decomp(&int_data, &test_data);
2509 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
2510 /* Compress with compressdev, decompress with Zlib */
2511 test_data.zlib_dir = ZLIB_DECOMPRESS;
2512 test_data.buff_type = LB_TO_SGL;
2513 ret = test_deflate_comp_decomp(&int_data, &test_data);
2517 /* Compress with Zlib, decompress with compressdev */
2518 test_data.zlib_dir = ZLIB_COMPRESS;
2519 test_data.buff_type = LB_TO_SGL;
2520 ret = test_deflate_comp_decomp(&int_data, &test_data);
2526 return TEST_SUCCESS;
2530 test_compressdev_deflate_stateless_checksum(void)
2532 struct comp_testsuite_params *ts_params = &testsuite_params;
2535 const struct rte_compressdev_capabilities *capab;
2537 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2538 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2540 /* Check if driver supports any checksum */
2541 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
2542 (capab->comp_feature_flags &
2543 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
2544 (capab->comp_feature_flags &
2545 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
2548 struct rte_comp_xform *compress_xform =
2549 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2550 if (compress_xform == NULL) {
2551 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2555 memcpy(compress_xform, ts_params->def_comp_xform,
2556 sizeof(struct rte_comp_xform));
2558 struct rte_comp_xform *decompress_xform =
2559 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2560 if (decompress_xform == NULL) {
2561 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2562 rte_free(compress_xform);
2566 memcpy(decompress_xform, ts_params->def_decomp_xform,
2567 sizeof(struct rte_comp_xform));
2569 struct interim_data_params int_data = {
2578 struct test_data_params test_data = {
2579 .compress_state = RTE_COMP_OP_STATELESS,
2580 .decompress_state = RTE_COMP_OP_STATELESS,
2581 .buff_type = LB_BOTH,
2582 .zlib_dir = ZLIB_DECOMPRESS,
2585 .overflow = OVERFLOW_DISABLED
2588 /* Check if driver supports crc32 checksum and test */
2589 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
2590 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2591 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2593 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2594 /* Compress with compressdev, decompress with Zlib */
2595 int_data.test_bufs = &compress_test_bufs[i];
2596 int_data.buf_idx = &i;
2598 /* Generate zlib checksum and test against selected
2599 * drivers decompression checksum
2601 test_data.zlib_dir = ZLIB_COMPRESS;
2602 ret = test_deflate_comp_decomp(&int_data, &test_data);
2606 /* Generate compression and decompression
2607 * checksum of selected driver
2609 test_data.zlib_dir = ZLIB_NONE;
2610 ret = test_deflate_comp_decomp(&int_data, &test_data);
2616 /* Check if driver supports adler32 checksum and test */
2617 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2618 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2619 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2621 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2622 int_data.test_bufs = &compress_test_bufs[i];
2623 int_data.buf_idx = &i;
2625 /* Generate zlib checksum and test against selected
2626 * drivers decompression checksum
2628 test_data.zlib_dir = ZLIB_COMPRESS;
2629 ret = test_deflate_comp_decomp(&int_data, &test_data);
2632 /* Generate compression and decompression
2633 * checksum of selected driver
2635 test_data.zlib_dir = ZLIB_NONE;
2636 ret = test_deflate_comp_decomp(&int_data, &test_data);
2642 /* Check if driver supports combined crc and adler checksum and test */
2643 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2644 compress_xform->compress.chksum =
2645 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2646 decompress_xform->decompress.chksum =
2647 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2649 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2650 int_data.test_bufs = &compress_test_bufs[i];
2651 int_data.buf_idx = &i;
2653 /* Generate compression and decompression
2654 * checksum of selected driver
2656 test_data.zlib_dir = ZLIB_NONE;
2657 ret = test_deflate_comp_decomp(&int_data, &test_data);
2666 rte_free(compress_xform);
2667 rte_free(decompress_xform);
2672 test_compressdev_out_of_space_buffer(void)
2674 struct comp_testsuite_params *ts_params = &testsuite_params;
2677 const struct rte_compressdev_capabilities *capab;
2679 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2681 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2682 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2684 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2687 struct interim_data_params int_data = {
2688 &compress_test_bufs[0],
2691 &ts_params->def_comp_xform,
2692 &ts_params->def_decomp_xform,
2696 struct test_data_params test_data = {
2697 .compress_state = RTE_COMP_OP_STATELESS,
2698 .decompress_state = RTE_COMP_OP_STATELESS,
2699 .buff_type = LB_BOTH,
2700 .zlib_dir = ZLIB_DECOMPRESS,
2701 .out_of_space = 1, /* run out-of-space test */
2703 .overflow = OVERFLOW_DISABLED
2705 /* Compress with compressdev, decompress with Zlib */
2706 test_data.zlib_dir = ZLIB_DECOMPRESS;
2707 ret = test_deflate_comp_decomp(&int_data, &test_data);
2711 /* Compress with Zlib, decompress with compressdev */
2712 test_data.zlib_dir = ZLIB_COMPRESS;
2713 ret = test_deflate_comp_decomp(&int_data, &test_data);
2717 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2718 /* Compress with compressdev, decompress with Zlib */
2719 test_data.zlib_dir = ZLIB_DECOMPRESS;
2720 test_data.buff_type = SGL_BOTH;
2721 ret = test_deflate_comp_decomp(&int_data, &test_data);
2725 /* Compress with Zlib, decompress with compressdev */
2726 test_data.zlib_dir = ZLIB_COMPRESS;
2727 test_data.buff_type = SGL_BOTH;
2728 ret = test_deflate_comp_decomp(&int_data, &test_data);
2740 test_compressdev_deflate_stateless_dynamic_big(void)
2742 struct comp_testsuite_params *ts_params = &testsuite_params;
2746 const struct rte_compressdev_capabilities *capab;
2747 char *test_buffer = NULL;
2749 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2750 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2752 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2755 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2758 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2759 if (test_buffer == NULL) {
2761 "Can't allocate buffer for big-data\n");
2765 struct interim_data_params int_data = {
2766 (const char * const *)&test_buffer,
2769 &ts_params->def_comp_xform,
2770 &ts_params->def_decomp_xform,
2774 struct test_data_params test_data = {
2775 .compress_state = RTE_COMP_OP_STATELESS,
2776 .decompress_state = RTE_COMP_OP_STATELESS,
2777 .buff_type = SGL_BOTH,
2778 .zlib_dir = ZLIB_DECOMPRESS,
2781 .overflow = OVERFLOW_DISABLED
2784 ts_params->def_comp_xform->compress.deflate.huffman =
2785 RTE_COMP_HUFFMAN_DYNAMIC;
2787 /* fill the buffer with data based on rand. data */
2788 srand(BIG_DATA_TEST_SIZE);
2789 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2790 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2791 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2793 /* Compress with compressdev, decompress with Zlib */
2794 test_data.zlib_dir = ZLIB_DECOMPRESS;
2795 ret = test_deflate_comp_decomp(&int_data, &test_data);
2799 /* Compress with Zlib, decompress with compressdev */
2800 test_data.zlib_dir = ZLIB_COMPRESS;
2801 ret = test_deflate_comp_decomp(&int_data, &test_data);
2808 ts_params->def_comp_xform->compress.deflate.huffman =
2809 RTE_COMP_HUFFMAN_DEFAULT;
2810 rte_free(test_buffer);
2815 test_compressdev_deflate_stateful_decomp(void)
2817 struct comp_testsuite_params *ts_params = &testsuite_params;
2820 const struct rte_compressdev_capabilities *capab;
2822 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2823 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2825 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2828 struct interim_data_params int_data = {
2829 &compress_test_bufs[0],
2832 &ts_params->def_comp_xform,
2833 &ts_params->def_decomp_xform,
2837 struct test_data_params test_data = {
2838 .compress_state = RTE_COMP_OP_STATELESS,
2839 .decompress_state = RTE_COMP_OP_STATEFUL,
2840 .buff_type = LB_BOTH,
2841 .zlib_dir = ZLIB_COMPRESS,
2844 .decompress_output_block_size = 2000,
2845 .decompress_steps_max = 4,
2846 .overflow = OVERFLOW_DISABLED
2849 /* Compress with Zlib, decompress with compressdev */
2850 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2855 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2856 /* Now test with SGL buffers */
2857 test_data.buff_type = SGL_BOTH;
2858 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2871 test_compressdev_deflate_stateful_decomp_checksum(void)
2873 struct comp_testsuite_params *ts_params = &testsuite_params;
2876 const struct rte_compressdev_capabilities *capab;
2878 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2879 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2881 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2884 /* Check if driver supports any checksum */
2885 if (!(capab->comp_feature_flags &
2886 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2887 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2890 struct rte_comp_xform *compress_xform =
2891 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2892 if (compress_xform == NULL) {
2893 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2897 memcpy(compress_xform, ts_params->def_comp_xform,
2898 sizeof(struct rte_comp_xform));
2900 struct rte_comp_xform *decompress_xform =
2901 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2902 if (decompress_xform == NULL) {
2903 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2904 rte_free(compress_xform);
2908 memcpy(decompress_xform, ts_params->def_decomp_xform,
2909 sizeof(struct rte_comp_xform));
2911 struct interim_data_params int_data = {
2912 &compress_test_bufs[0],
2920 struct test_data_params test_data = {
2921 .compress_state = RTE_COMP_OP_STATELESS,
2922 .decompress_state = RTE_COMP_OP_STATEFUL,
2923 .buff_type = LB_BOTH,
2924 .zlib_dir = ZLIB_COMPRESS,
2927 .decompress_output_block_size = 2000,
2928 .decompress_steps_max = 4,
2929 .overflow = OVERFLOW_DISABLED
2932 /* Check if driver supports crc32 checksum and test */
2933 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2934 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2935 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2936 /* Compress with Zlib, decompress with compressdev */
2937 test_data.buff_type = LB_BOTH;
2938 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2942 if (capab->comp_feature_flags &
2943 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2944 /* Now test with SGL buffers */
2945 test_data.buff_type = SGL_BOTH;
2946 if (test_deflate_comp_decomp(&int_data,
2954 /* Check if driver supports adler32 checksum and test */
2955 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2956 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2957 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2958 /* Compress with Zlib, decompress with compressdev */
2959 test_data.buff_type = LB_BOTH;
2960 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2964 if (capab->comp_feature_flags &
2965 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2966 /* Now test with SGL buffers */
2967 test_data.buff_type = SGL_BOTH;
2968 if (test_deflate_comp_decomp(&int_data,
2976 /* Check if driver supports combined crc and adler checksum and test */
2977 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2978 compress_xform->compress.chksum =
2979 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2980 decompress_xform->decompress.chksum =
2981 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2982 /* Zlib doesn't support combined checksum */
2983 test_data.zlib_dir = ZLIB_NONE;
2984 /* Compress stateless, decompress stateful with compressdev */
2985 test_data.buff_type = LB_BOTH;
2986 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2990 if (capab->comp_feature_flags &
2991 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2992 /* Now test with SGL buffers */
2993 test_data.buff_type = SGL_BOTH;
2994 if (test_deflate_comp_decomp(&int_data,
3005 rte_free(compress_xform);
3006 rte_free(decompress_xform);
3010 static const struct rte_memzone *
3011 make_memzone(const char *name, size_t size)
3013 unsigned int socket_id = rte_socket_id();
3014 char mz_name[RTE_MEMZONE_NAMESIZE];
3015 const struct rte_memzone *memzone;
3017 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
3018 memzone = rte_memzone_lookup(mz_name);
3019 if (memzone != NULL && memzone->len != size) {
3020 rte_memzone_free(memzone);
3023 if (memzone == NULL) {
3024 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
3025 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
3026 if (memzone == NULL)
3027 RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
3034 test_compressdev_external_mbufs(void)
3036 struct comp_testsuite_params *ts_params = &testsuite_params;
3037 size_t data_len = 0;
3039 int ret = TEST_FAILED;
3041 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
3042 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
3044 struct interim_data_params int_data = {
3048 &ts_params->def_comp_xform,
3049 &ts_params->def_decomp_xform,
3053 struct test_data_params test_data = {
3054 .compress_state = RTE_COMP_OP_STATELESS,
3055 .decompress_state = RTE_COMP_OP_STATELESS,
3056 .buff_type = LB_BOTH,
3057 .zlib_dir = ZLIB_DECOMPRESS,
3060 .use_external_mbufs = 1,
3061 .inbuf_data_size = data_len,
3062 .inbuf_memzone = make_memzone("inbuf", data_len),
3063 .compbuf_memzone = make_memzone("compbuf", data_len *
3064 COMPRESS_BUF_SIZE_RATIO),
3065 .uncompbuf_memzone = make_memzone("decompbuf", data_len),
3066 .overflow = OVERFLOW_DISABLED
3069 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3070 /* prepare input data */
3071 data_len = strlen(compress_test_bufs[i]) + 1;
3072 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
3074 test_data.inbuf_data_size = data_len;
3075 int_data.buf_idx = &i;
3077 /* Compress with compressdev, decompress with Zlib */
3078 test_data.zlib_dir = ZLIB_DECOMPRESS;
3079 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3082 /* Compress with Zlib, decompress with compressdev */
3083 test_data.zlib_dir = ZLIB_COMPRESS;
3084 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3091 rte_memzone_free(test_data.inbuf_memzone);
3092 rte_memzone_free(test_data.compbuf_memzone);
3093 rte_memzone_free(test_data.uncompbuf_memzone);
3098 test_compressdev_deflate_stateless_fixed_oos_recoverable(void)
3100 struct comp_testsuite_params *ts_params = &testsuite_params;
3104 const struct rte_compressdev_capabilities *capab;
3106 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3107 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3109 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
3112 struct rte_comp_xform *compress_xform =
3113 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
3115 if (compress_xform == NULL) {
3117 "Compress xform could not be created\n");
3122 memcpy(compress_xform, ts_params->def_comp_xform,
3123 sizeof(struct rte_comp_xform));
3124 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
3126 struct interim_data_params int_data = {
3131 &ts_params->def_decomp_xform,
3135 struct test_data_params test_data = {
3136 .compress_state = RTE_COMP_OP_STATELESS,
3137 .decompress_state = RTE_COMP_OP_STATELESS,
3138 .buff_type = LB_BOTH,
3139 .zlib_dir = ZLIB_DECOMPRESS,
3142 .overflow = OVERFLOW_ENABLED
3145 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3146 int_data.test_bufs = &compress_test_bufs[i];
3147 int_data.buf_idx = &i;
3149 /* Compress with compressdev, decompress with Zlib */
3150 test_data.zlib_dir = ZLIB_DECOMPRESS;
3151 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3152 if (comp_result < 0) {
3155 } else if (comp_result > 0) {
3160 /* Compress with Zlib, decompress with compressdev */
3161 test_data.zlib_dir = ZLIB_COMPRESS;
3162 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3163 if (comp_result < 0) {
3166 } else if (comp_result > 0) {
3175 rte_free(compress_xform);
3179 static struct unit_test_suite compressdev_testsuite = {
3180 .suite_name = "compressdev unit test suite",
3181 .setup = testsuite_setup,
3182 .teardown = testsuite_teardown,
3183 .unit_test_cases = {
3184 TEST_CASE_ST(NULL, NULL,
3185 test_compressdev_invalid_configuration),
3186 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3187 test_compressdev_deflate_stateless_fixed),
3188 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3189 test_compressdev_deflate_stateless_dynamic),
3190 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3191 test_compressdev_deflate_stateless_dynamic_big),
3192 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3193 test_compressdev_deflate_stateless_multi_op),
3194 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3195 test_compressdev_deflate_stateless_multi_level),
3196 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3197 test_compressdev_deflate_stateless_multi_xform),
3198 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3199 test_compressdev_deflate_stateless_sgl),
3200 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3201 test_compressdev_deflate_stateless_checksum),
3202 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3203 test_compressdev_out_of_space_buffer),
3204 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3205 test_compressdev_deflate_stateful_decomp),
3206 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3207 test_compressdev_deflate_stateful_decomp_checksum),
3208 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3209 test_compressdev_external_mbufs),
3210 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3211 test_compressdev_deflate_stateless_fixed_oos_recoverable),
3212 TEST_CASES_END() /**< NULL terminate unit test array */
3217 test_compressdev(void)
3219 return unit_test_suite_runner(&compressdev_testsuite);
3222 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);