1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 - 2019 Intel Corporation
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
17 #include "test_compressdev_test_buffer.h"
20 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
28 * 30% extra size for compressed data compared to original data,
29 * in case data size cannot be reduced and it is actually bigger
30 * due to the compress block headers
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2
34 #define NUM_LARGE_MBUFS 16
35 #define SMALL_SEG_SIZE 256
38 #define NUM_MAX_XFORMS 16
39 #define NUM_MAX_INFLIGHT_OPS 128
42 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
43 #define ZLIB_HEADER_SIZE 2
44 #define ZLIB_TRAILER_SIZE 4
45 #define GZIP_HEADER_SIZE 10
46 #define GZIP_TRAILER_SIZE 8
48 #define OUT_OF_SPACE_BUF 1
50 #define MAX_MBUF_SEGMENT_SIZE 65535
51 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
52 #define NUM_BIG_MBUFS 4
53 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
56 huffman_type_strings[] = {
57 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
58 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
59 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
70 LB_BOTH = 0, /* both input and output are linear*/
71 SGL_BOTH, /* both input and output are chained */
72 SGL_TO_LB, /* input buffer is chained */
73 LB_TO_SGL /* output buffer is chained */
85 struct comp_testsuite_params {
86 struct rte_mempool *large_mbuf_pool;
87 struct rte_mempool *small_mbuf_pool;
88 struct rte_mempool *big_mbuf_pool;
89 struct rte_mempool *op_pool;
90 struct rte_comp_xform *def_comp_xform;
91 struct rte_comp_xform *def_decomp_xform;
94 struct interim_data_params {
95 const char * const *test_bufs;
96 unsigned int num_bufs;
98 struct rte_comp_xform **compress_xforms;
99 struct rte_comp_xform **decompress_xforms;
100 unsigned int num_xforms;
103 struct test_data_params {
104 enum rte_comp_op_type compress_state;
105 enum rte_comp_op_type decompress_state;
106 enum varied_buff buff_type;
107 enum zlib_direction zlib_dir;
108 unsigned int out_of_space;
109 unsigned int big_data;
110 /* stateful decompression specific parameters */
111 unsigned int decompress_output_block_size;
112 unsigned int decompress_steps_max;
113 /* external mbufs specific parameters */
114 unsigned int use_external_mbufs;
115 unsigned int inbuf_data_size;
116 const struct rte_memzone *inbuf_memzone;
117 const struct rte_memzone *compbuf_memzone;
118 const struct rte_memzone *uncompbuf_memzone;
119 /* overflow test activation */
120 enum overflow_test overflow;
123 static struct comp_testsuite_params testsuite_params = { 0 };
126 testsuite_teardown(void)
128 struct comp_testsuite_params *ts_params = &testsuite_params;
130 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
131 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
132 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
133 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
134 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
135 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
136 if (rte_mempool_in_use_count(ts_params->op_pool))
137 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
139 rte_mempool_free(ts_params->large_mbuf_pool);
140 rte_mempool_free(ts_params->small_mbuf_pool);
141 rte_mempool_free(ts_params->big_mbuf_pool);
142 rte_mempool_free(ts_params->op_pool);
143 rte_free(ts_params->def_comp_xform);
144 rte_free(ts_params->def_decomp_xform);
148 testsuite_setup(void)
150 struct comp_testsuite_params *ts_params = &testsuite_params;
151 uint32_t max_buf_size = 0;
154 if (rte_compressdev_count() == 0) {
155 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
159 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
160 rte_compressdev_name_get(0));
162 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
163 max_buf_size = RTE_MAX(max_buf_size,
164 strlen(compress_test_bufs[i]) + 1);
167 * Buffers to be used in compression and decompression.
168 * Since decompressed data might be larger than
169 * compressed data (due to block header),
170 * buffers should be big enough for both cases.
172 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
173 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
176 max_buf_size + RTE_PKTMBUF_HEADROOM,
178 if (ts_params->large_mbuf_pool == NULL) {
179 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
183 /* Create mempool with smaller buffers for SGL testing */
184 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
185 NUM_LARGE_MBUFS * MAX_SEGS,
187 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
189 if (ts_params->small_mbuf_pool == NULL) {
190 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
194 /* Create mempool with big buffers for SGL testing */
195 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
198 MAX_MBUF_SEGMENT_SIZE,
200 if (ts_params->big_mbuf_pool == NULL) {
201 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
205 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
206 0, sizeof(struct priv_op_data),
208 if (ts_params->op_pool == NULL) {
209 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
213 ts_params->def_comp_xform =
214 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
215 if (ts_params->def_comp_xform == NULL) {
217 "Default compress xform could not be created\n");
220 ts_params->def_decomp_xform =
221 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
222 if (ts_params->def_decomp_xform == NULL) {
224 "Default decompress xform could not be created\n");
228 /* Initializes default values for compress/decompress xforms */
229 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
230 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
231 ts_params->def_comp_xform->compress.deflate.huffman =
232 RTE_COMP_HUFFMAN_DEFAULT;
233 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
234 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
235 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
237 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
238 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
239 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
240 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
245 testsuite_teardown();
251 generic_ut_setup(void)
253 /* Configure compressdev (one device, one queue pair) */
254 struct rte_compressdev_config config = {
255 .socket_id = rte_socket_id(),
257 .max_nb_priv_xforms = NUM_MAX_XFORMS,
261 if (rte_compressdev_configure(0, &config) < 0) {
262 RTE_LOG(ERR, USER1, "Device configuration failed\n");
266 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
267 rte_socket_id()) < 0) {
268 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
272 if (rte_compressdev_start(0) < 0) {
273 RTE_LOG(ERR, USER1, "Device could not be started\n");
281 generic_ut_teardown(void)
283 rte_compressdev_stop(0);
284 if (rte_compressdev_close(0) < 0)
285 RTE_LOG(ERR, USER1, "Device could not be closed\n");
289 test_compressdev_invalid_configuration(void)
291 struct rte_compressdev_config invalid_config;
292 struct rte_compressdev_config valid_config = {
293 .socket_id = rte_socket_id(),
295 .max_nb_priv_xforms = NUM_MAX_XFORMS,
298 struct rte_compressdev_info dev_info;
300 /* Invalid configuration with 0 queue pairs */
301 memcpy(&invalid_config, &valid_config,
302 sizeof(struct rte_compressdev_config));
303 invalid_config.nb_queue_pairs = 0;
305 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
306 "Device configuration was successful "
307 "with no queue pairs (invalid)\n");
310 * Invalid configuration with too many queue pairs
311 * (if there is an actual maximum number of queue pairs)
313 rte_compressdev_info_get(0, &dev_info);
314 if (dev_info.max_nb_queue_pairs != 0) {
315 memcpy(&invalid_config, &valid_config,
316 sizeof(struct rte_compressdev_config));
317 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
319 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
320 "Device configuration was successful "
321 "with too many queue pairs (invalid)\n");
324 /* Invalid queue pair setup, with no number of queue pairs set */
325 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
326 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
327 "Queue pair setup was successful "
328 "with no queue pairs set (invalid)\n");
334 compare_buffers(const char *buffer1, uint32_t buffer1_len,
335 const char *buffer2, uint32_t buffer2_len)
337 if (buffer1_len != buffer2_len) {
338 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
342 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
343 RTE_LOG(ERR, USER1, "Buffers are different\n");
351 * Maps compressdev and Zlib flush flags
354 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
357 case RTE_COMP_FLUSH_NONE:
359 case RTE_COMP_FLUSH_SYNC:
361 case RTE_COMP_FLUSH_FULL:
363 case RTE_COMP_FLUSH_FINAL:
366 * There should be only the values above,
367 * so this should never happen
375 compress_zlib(struct rte_comp_op *op,
376 const struct rte_comp_xform *xform, int mem_level)
380 int strategy, window_bits, comp_level;
381 int ret = TEST_FAILED;
382 uint8_t *single_src_buf = NULL;
383 uint8_t *single_dst_buf = NULL;
385 /* initialize zlib stream */
386 stream.zalloc = Z_NULL;
387 stream.zfree = Z_NULL;
388 stream.opaque = Z_NULL;
390 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
393 strategy = Z_DEFAULT_STRATEGY;
396 * Window bits is the base two logarithm of the window size (in bytes).
397 * When doing raw DEFLATE, this number will be negative.
399 window_bits = -(xform->compress.window_size);
400 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
402 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
403 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
405 comp_level = xform->compress.level;
407 if (comp_level != RTE_COMP_LEVEL_NONE)
408 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
409 window_bits, mem_level, strategy);
411 ret = deflateInit(&stream, Z_NO_COMPRESSION);
414 printf("Zlib deflate could not be initialized\n");
418 /* Assuming stateless operation */
420 if (op->m_src->nb_segs > 1) {
421 single_src_buf = rte_malloc(NULL,
422 rte_pktmbuf_pkt_len(op->m_src), 0);
423 if (single_src_buf == NULL) {
424 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
428 if (rte_pktmbuf_read(op->m_src, op->src.offset,
429 rte_pktmbuf_pkt_len(op->m_src) -
431 single_src_buf) == NULL) {
433 "Buffer could not be read entirely\n");
437 stream.avail_in = op->src.length;
438 stream.next_in = single_src_buf;
441 stream.avail_in = op->src.length;
442 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
446 if (op->m_dst->nb_segs > 1) {
448 single_dst_buf = rte_malloc(NULL,
449 rte_pktmbuf_pkt_len(op->m_dst), 0);
450 if (single_dst_buf == NULL) {
452 "Buffer could not be allocated\n");
456 stream.avail_out = op->m_dst->pkt_len;
457 stream.next_out = single_dst_buf;
459 } else {/* linear output */
460 stream.avail_out = op->m_dst->data_len;
461 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
465 /* Stateless operation, all buffer will be compressed in one go */
466 zlib_flush = map_zlib_flush_flag(op->flush_flag);
467 ret = deflate(&stream, zlib_flush);
469 if (stream.avail_in != 0) {
470 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
474 if (ret != Z_STREAM_END)
477 /* Copy data to destination SGL */
478 if (op->m_dst->nb_segs > 1) {
479 uint32_t remaining_data = stream.total_out;
480 uint8_t *src_data = single_dst_buf;
481 struct rte_mbuf *dst_buf = op->m_dst;
483 while (remaining_data > 0) {
484 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
485 uint8_t *, op->dst.offset);
487 if (remaining_data < dst_buf->data_len) {
488 memcpy(dst_data, src_data, remaining_data);
491 memcpy(dst_data, src_data, dst_buf->data_len);
492 remaining_data -= dst_buf->data_len;
493 src_data += dst_buf->data_len;
494 dst_buf = dst_buf->next;
499 op->consumed = stream.total_in;
500 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
501 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
502 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
503 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
505 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
506 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
507 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
508 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
511 op->produced = stream.total_out;
513 op->status = RTE_COMP_OP_STATUS_SUCCESS;
514 op->output_chksum = stream.adler;
516 deflateReset(&stream);
521 rte_free(single_src_buf);
522 rte_free(single_dst_buf);
528 decompress_zlib(struct rte_comp_op *op,
529 const struct rte_comp_xform *xform)
534 int ret = TEST_FAILED;
535 uint8_t *single_src_buf = NULL;
536 uint8_t *single_dst_buf = NULL;
538 /* initialize zlib stream */
539 stream.zalloc = Z_NULL;
540 stream.zfree = Z_NULL;
541 stream.opaque = Z_NULL;
544 * Window bits is the base two logarithm of the window size (in bytes).
545 * When doing raw DEFLATE, this number will be negative.
547 window_bits = -(xform->decompress.window_size);
548 ret = inflateInit2(&stream, window_bits);
551 printf("Zlib deflate could not be initialized\n");
555 /* Assuming stateless operation */
557 if (op->m_src->nb_segs > 1) {
558 single_src_buf = rte_malloc(NULL,
559 rte_pktmbuf_pkt_len(op->m_src), 0);
560 if (single_src_buf == NULL) {
561 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
564 single_dst_buf = rte_malloc(NULL,
565 rte_pktmbuf_pkt_len(op->m_dst), 0);
566 if (single_dst_buf == NULL) {
567 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
570 if (rte_pktmbuf_read(op->m_src, 0,
571 rte_pktmbuf_pkt_len(op->m_src),
572 single_src_buf) == NULL) {
574 "Buffer could not be read entirely\n");
578 stream.avail_in = op->src.length;
579 stream.next_in = single_src_buf;
580 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
581 stream.next_out = single_dst_buf;
584 stream.avail_in = op->src.length;
585 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
586 stream.avail_out = op->m_dst->data_len;
587 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
590 /* Stateless operation, all buffer will be compressed in one go */
591 zlib_flush = map_zlib_flush_flag(op->flush_flag);
592 ret = inflate(&stream, zlib_flush);
594 if (stream.avail_in != 0) {
595 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
599 if (ret != Z_STREAM_END)
602 if (op->m_src->nb_segs > 1) {
603 uint32_t remaining_data = stream.total_out;
604 uint8_t *src_data = single_dst_buf;
605 struct rte_mbuf *dst_buf = op->m_dst;
607 while (remaining_data > 0) {
608 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
611 if (remaining_data < dst_buf->data_len) {
612 memcpy(dst_data, src_data, remaining_data);
615 memcpy(dst_data, src_data, dst_buf->data_len);
616 remaining_data -= dst_buf->data_len;
617 src_data += dst_buf->data_len;
618 dst_buf = dst_buf->next;
623 op->consumed = stream.total_in;
624 op->produced = stream.total_out;
625 op->status = RTE_COMP_OP_STATUS_SUCCESS;
627 inflateReset(&stream);
637 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
638 uint32_t total_data_size,
639 struct rte_mempool *small_mbuf_pool,
640 struct rte_mempool *large_mbuf_pool,
641 uint8_t limit_segs_in_sgl,
644 uint32_t remaining_data = total_data_size;
645 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
646 struct rte_mempool *pool;
647 struct rte_mbuf *next_seg;
650 const char *data_ptr = test_buf;
654 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
655 num_remaining_segs = limit_segs_in_sgl - 1;
658 * Allocate data in the first segment (header) and
659 * copy data if test buffer is provided
661 if (remaining_data < seg_size)
662 data_size = remaining_data;
664 data_size = seg_size;
665 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
666 if (buf_ptr == NULL) {
668 "Not enough space in the 1st buffer\n");
672 if (data_ptr != NULL) {
673 /* Copy characters without NULL terminator */
674 strncpy(buf_ptr, data_ptr, data_size);
675 data_ptr += data_size;
677 remaining_data -= data_size;
678 num_remaining_segs--;
681 * Allocate the rest of the segments,
682 * copy the rest of the data and chain the segments.
684 for (i = 0; i < num_remaining_segs; i++) {
686 if (i == (num_remaining_segs - 1)) {
688 if (remaining_data > seg_size)
689 pool = large_mbuf_pool;
691 pool = small_mbuf_pool;
692 data_size = remaining_data;
694 data_size = seg_size;
695 pool = small_mbuf_pool;
698 next_seg = rte_pktmbuf_alloc(pool);
699 if (next_seg == NULL) {
701 "New segment could not be allocated "
702 "from the mempool\n");
705 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
706 if (buf_ptr == NULL) {
708 "Not enough space in the buffer\n");
709 rte_pktmbuf_free(next_seg);
712 if (data_ptr != NULL) {
713 /* Copy characters without NULL terminator */
714 strncpy(buf_ptr, data_ptr, data_size);
715 data_ptr += data_size;
717 remaining_data -= data_size;
719 ret = rte_pktmbuf_chain(head_buf, next_seg);
721 rte_pktmbuf_free(next_seg);
723 "Segment could not chained\n");
732 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
737 test_run_enqueue_dequeue(struct rte_comp_op **ops, unsigned int num_bufs,
738 struct rte_comp_op **ops_processed)
740 uint16_t num_enqd, num_deqd, num_total_deqd;
741 unsigned int deqd_retries = 0;
743 /* Enqueue and dequeue all operations */
744 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
745 if (num_enqd < num_bufs) {
747 "Some operations could not be enqueued\n");
754 * If retrying a dequeue call, wait for 10 ms to allow
755 * enough time to the driver to process the operations
757 if (deqd_retries != 0) {
759 * Avoid infinite loop if not all the
760 * operations get out of the device
762 if (deqd_retries == MAX_DEQD_RETRIES) {
764 "Not all operations could be dequeued\n");
767 usleep(DEQUEUE_WAIT_TIME);
769 num_deqd = rte_compressdev_dequeue_burst(0, 0,
770 &ops_processed[num_total_deqd], num_bufs);
771 num_total_deqd += num_deqd;
774 } while (num_total_deqd < num_enqd);
780 * Compresses and decompresses buffer with compressdev API and Zlib API
783 test_deflate_comp_decomp(const struct interim_data_params *int_data,
784 const struct test_data_params *test_data)
786 struct comp_testsuite_params *ts_params = &testsuite_params;
787 const char * const *test_bufs = int_data->test_bufs;
788 unsigned int num_bufs = int_data->num_bufs;
789 uint16_t *buf_idx = int_data->buf_idx;
790 struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
791 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
792 unsigned int num_xforms = int_data->num_xforms;
793 enum rte_comp_op_type compress_state = test_data->compress_state;
794 enum rte_comp_op_type decompress_state = test_data->decompress_state;
795 unsigned int buff_type = test_data->buff_type;
796 unsigned int out_of_space = test_data->out_of_space;
797 unsigned int big_data = test_data->big_data;
798 enum zlib_direction zlib_dir = test_data->zlib_dir;
799 enum overflow_test overflow_tst = test_data->overflow;
800 int ret_status = TEST_FAILED;
801 struct rte_mbuf_ext_shared_info inbuf_info;
802 struct rte_mbuf_ext_shared_info compbuf_info;
803 struct rte_mbuf_ext_shared_info decompbuf_info;
805 struct rte_mbuf *uncomp_bufs[num_bufs];
806 struct rte_mbuf *comp_bufs[num_bufs];
807 struct rte_comp_op *ops[num_bufs];
808 struct rte_comp_op *ops_processed[num_bufs];
809 void *priv_xforms[num_bufs];
810 uint16_t num_enqd, num_deqd, num_total_deqd;
811 uint16_t num_priv_xforms = 0;
812 unsigned int deqd_retries = 0;
813 struct priv_op_data *priv_data;
816 struct rte_mempool *buf_pool;
818 /* Compressing with CompressDev */
819 unsigned int oos_zlib_decompress =
820 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
821 /* Decompressing with CompressDev */
822 unsigned int oos_zlib_compress =
823 (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
824 const struct rte_compressdev_capabilities *capa =
825 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
826 char *contig_buf = NULL;
827 uint64_t compress_checksum[num_bufs];
828 uint32_t compressed_data_size[num_bufs];
830 char *all_decomp_data = NULL;
831 unsigned int decomp_produced_data_size = 0;
832 unsigned int step = 0;
834 TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
835 "Number of stateful operations in a step should be 1");
839 "Compress device does not support DEFLATE\n");
843 /* Initialize all arrays to NULL */
844 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
845 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
846 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
847 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
848 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
849 memset(compressed_data_size, 0, sizeof(uint32_t) * num_bufs);
851 if (decompress_state == RTE_COMP_OP_STATEFUL) {
852 data_size = strlen(test_bufs[0]) + 1;
853 all_decomp_data = rte_malloc(NULL, data_size,
854 RTE_CACHE_LINE_SIZE);
858 buf_pool = ts_params->big_mbuf_pool;
859 else if (buff_type == SGL_BOTH)
860 buf_pool = ts_params->small_mbuf_pool;
862 buf_pool = ts_params->large_mbuf_pool;
864 /* Prepare the source mbufs with the data */
865 ret = rte_pktmbuf_alloc_bulk(buf_pool,
866 uncomp_bufs, num_bufs);
869 "Source mbufs could not be allocated "
870 "from the mempool\n");
874 if (test_data->use_external_mbufs) {
875 inbuf_info.free_cb = extbuf_free_callback;
876 inbuf_info.fcb_opaque = NULL;
877 rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
878 for (i = 0; i < num_bufs; i++) {
879 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
880 test_data->inbuf_memzone->addr,
881 test_data->inbuf_memzone->iova,
882 test_data->inbuf_data_size,
884 rte_pktmbuf_append(uncomp_bufs[i],
885 test_data->inbuf_data_size);
887 } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
888 for (i = 0; i < num_bufs; i++) {
889 data_size = strlen(test_bufs[i]) + 1;
890 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
892 big_data ? buf_pool : ts_params->small_mbuf_pool,
893 big_data ? buf_pool : ts_params->large_mbuf_pool,
894 big_data ? 0 : MAX_SEGS,
895 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
899 for (i = 0; i < num_bufs; i++) {
900 data_size = strlen(test_bufs[i]) + 1;
901 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
902 if (buf_ptr == NULL) {
904 "Append extra bytes to the source mbuf failed\n");
907 strlcpy(buf_ptr, test_bufs[i], data_size);
911 /* Prepare the destination mbufs */
912 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
915 "Destination mbufs could not be allocated "
916 "from the mempool\n");
920 if (test_data->use_external_mbufs) {
921 compbuf_info.free_cb = extbuf_free_callback;
922 compbuf_info.fcb_opaque = NULL;
923 rte_mbuf_ext_refcnt_set(&compbuf_info, 1);
924 for (i = 0; i < num_bufs; i++) {
925 rte_pktmbuf_attach_extbuf(comp_bufs[i],
926 test_data->compbuf_memzone->addr,
927 test_data->compbuf_memzone->iova,
928 test_data->compbuf_memzone->len,
930 rte_pktmbuf_append(comp_bufs[i],
931 test_data->compbuf_memzone->len);
933 } else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
934 for (i = 0; i < num_bufs; i++) {
935 if (out_of_space == 1 && oos_zlib_decompress)
936 data_size = OUT_OF_SPACE_BUF;
938 (data_size = strlen(test_bufs[i]) *
939 COMPRESS_BUF_SIZE_RATIO);
941 if (prepare_sgl_bufs(NULL, comp_bufs[i],
943 big_data ? buf_pool : ts_params->small_mbuf_pool,
944 big_data ? buf_pool : ts_params->large_mbuf_pool,
945 big_data ? 0 : MAX_SEGS,
946 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
952 for (i = 0; i < num_bufs; i++) {
953 if (out_of_space == 1 && oos_zlib_decompress)
954 data_size = OUT_OF_SPACE_BUF;
957 ((test_data->zlib_dir == ZLIB_DECOMPRESS ||
958 test_data->zlib_dir == ZLIB_NONE) &&
959 overflow_tst == OVERFLOW_ENABLED) ?
960 COMPRESS_BUF_SIZE_RATIO_OVERFLOW :
961 COMPRESS_BUF_SIZE_RATIO;
963 data_size = strlen(test_bufs[i]) * ratio;
965 buf_ptr = rte_pktmbuf_append(comp_bufs[i], data_size);
966 if (buf_ptr == NULL) {
968 "Append extra bytes to the destination mbuf failed\n");
974 /* Build the compression operations */
975 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
978 "Compress operations could not be allocated "
979 "from the mempool\n");
984 for (i = 0; i < num_bufs; i++) {
985 ops[i]->m_src = uncomp_bufs[i];
986 ops[i]->m_dst = comp_bufs[i];
987 ops[i]->src.offset = 0;
988 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
989 ops[i]->dst.offset = 0;
990 if (compress_state == RTE_COMP_OP_STATELESS)
991 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
994 "Stateful operations are not supported "
995 "in these tests yet\n");
998 ops[i]->input_chksum = 0;
1000 * Store original operation index in private data,
1001 * since ordering does not have to be maintained,
1002 * when dequeueing from compressdev, so a comparison
1003 * at the end of the test can be done.
1005 priv_data = (struct priv_op_data *) (ops[i] + 1);
1006 priv_data->orig_idx = i;
1009 /* Compress data (either with Zlib API or compressdev API */
1010 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
1011 for (i = 0; i < num_bufs; i++) {
1012 const struct rte_comp_xform *compress_xform =
1013 compress_xforms[i % num_xforms];
1014 ret = compress_zlib(ops[i], compress_xform,
1019 ops_processed[i] = ops[i];
1022 /* Create compress private xform data */
1023 for (i = 0; i < num_xforms; i++) {
1024 ret = rte_compressdev_private_xform_create(0,
1025 (const struct rte_comp_xform *)compress_xforms[i],
1029 "Compression private xform "
1030 "could not be created\n");
1036 if (capa->comp_feature_flags &
1037 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1038 /* Attach shareable private xform data to ops */
1039 for (i = 0; i < num_bufs; i++)
1040 ops[i]->private_xform = priv_xforms[i % num_xforms];
1042 /* Create rest of the private xforms for the other ops */
1043 for (i = num_xforms; i < num_bufs; i++) {
1044 ret = rte_compressdev_private_xform_create(0,
1045 compress_xforms[i % num_xforms],
1049 "Compression private xform "
1050 "could not be created\n");
1056 /* Attach non shareable private xform data to ops */
1057 for (i = 0; i < num_bufs; i++)
1058 ops[i]->private_xform = priv_xforms[i];
1062 ret = test_run_enqueue_dequeue(ops, num_bufs, ops_processed);
1065 "Enqueue/dequeue operation failed\n");
1069 for (i = 0; i < num_bufs; i++) {
1070 compressed_data_size[i] += ops_processed[i]->produced;
1072 if (ops_processed[i]->status ==
1073 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
1076 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1077 ops[i]->src.offset +=
1078 ops_processed[i]->consumed;
1079 ops[i]->src.length -=
1080 ops_processed[i]->consumed;
1081 ops[i]->dst.offset +=
1082 ops_processed[i]->produced;
1084 buf_ptr = rte_pktmbuf_append(
1086 ops_processed[i]->produced);
1088 if (buf_ptr == NULL) {
1090 "Data recovery: append extra bytes to the current mbuf failed\n");
1098 /* Free compress private xforms */
1099 for (i = 0; i < num_priv_xforms; i++) {
1100 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1101 priv_xforms[i] = NULL;
1103 num_priv_xforms = 0;
1106 for (i = 0; i < num_bufs; i++) {
1107 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1108 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1109 const struct rte_comp_compress_xform *compress_xform =
1110 &compress_xforms[xform_idx]->compress;
1111 enum rte_comp_huffman huffman_type =
1112 compress_xform->deflate.huffman;
1113 char engine[] = "zlib (directly, not PMD)";
1114 if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
1115 strlcpy(engine, "PMD", sizeof(engine));
1117 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1118 " %u bytes (level = %d, huffman = %s)\n",
1119 buf_idx[priv_data->orig_idx], engine,
1120 ops_processed[i]->consumed, ops_processed[i]->produced,
1121 compress_xform->level,
1122 huffman_type_strings[huffman_type]);
1123 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1124 ops_processed[i]->consumed == 0 ? 0 :
1125 (float)ops_processed[i]->produced /
1126 ops_processed[i]->consumed * 100);
1127 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1128 compress_checksum[i] = ops_processed[i]->output_chksum;
1133 * Check operation status and free source mbufs (destination mbuf and
1134 * compress operation information is needed for the decompression stage)
1136 for (i = 0; i < num_bufs; i++) {
1137 if (out_of_space && oos_zlib_decompress) {
1138 if (ops_processed[i]->status !=
1139 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1140 ret_status = TEST_FAILED;
1142 "Operation without expected out of "
1143 "space status error\n");
1149 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1150 if (overflow_tst == OVERFLOW_ENABLED) {
1151 if (ops_processed[i]->status ==
1152 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1154 RTE_LOG(INFO, USER1,
1155 "Out-of-space-recoverable functionality"
1156 " is not supported on this device\n");
1161 "Some operations were not successful\n");
1164 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1165 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1166 uncomp_bufs[priv_data->orig_idx] = NULL;
1169 if (out_of_space && oos_zlib_decompress) {
1170 ret_status = TEST_SUCCESS;
1174 /* Allocate buffers for decompressed data */
1175 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1178 "Destination mbufs could not be allocated "
1179 "from the mempool\n");
1183 if (test_data->use_external_mbufs) {
1184 decompbuf_info.free_cb = extbuf_free_callback;
1185 decompbuf_info.fcb_opaque = NULL;
1186 rte_mbuf_ext_refcnt_set(&decompbuf_info, 1);
1187 for (i = 0; i < num_bufs; i++) {
1188 rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
1189 test_data->uncompbuf_memzone->addr,
1190 test_data->uncompbuf_memzone->iova,
1191 test_data->uncompbuf_memzone->len,
1193 rte_pktmbuf_append(uncomp_bufs[i],
1194 test_data->uncompbuf_memzone->len);
1196 } else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1197 for (i = 0; i < num_bufs; i++) {
1198 priv_data = (struct priv_op_data *)
1199 (ops_processed[i] + 1);
1200 if (out_of_space == 1 && oos_zlib_compress)
1201 data_size = OUT_OF_SPACE_BUF;
1202 else if (test_data->decompress_output_block_size != 0)
1204 test_data->decompress_output_block_size;
1207 strlen(test_bufs[priv_data->orig_idx]) + 1;
1209 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1211 big_data ? buf_pool : ts_params->small_mbuf_pool,
1212 big_data ? buf_pool : ts_params->large_mbuf_pool,
1213 big_data ? 0 : MAX_SEGS,
1214 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1220 for (i = 0; i < num_bufs; i++) {
1221 priv_data = (struct priv_op_data *)
1222 (ops_processed[i] + 1);
1223 if (out_of_space == 1 && oos_zlib_compress)
1224 data_size = OUT_OF_SPACE_BUF;
1225 else if (test_data->decompress_output_block_size != 0)
1227 test_data->decompress_output_block_size;
1230 strlen(test_bufs[priv_data->orig_idx]) + 1;
1232 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
1233 if (buf_ptr == NULL) {
1235 "Append extra bytes to the decompressed mbuf failed\n");
1241 /* Build the decompression operations */
1242 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1245 "Decompress operations could not be allocated "
1246 "from the mempool\n");
1250 /* Source buffer is the compressed data from the previous operations */
1251 for (i = 0; i < num_bufs; i++) {
1252 ops[i]->m_src = ops_processed[i]->m_dst;
1253 ops[i]->m_dst = uncomp_bufs[i];
1254 ops[i]->src.offset = 0;
1256 * Set the length of the compressed data to the
1257 * number of bytes that were produced in the previous stage
1259 if (compressed_data_size[i])
1260 ops[i]->src.length = compressed_data_size[i];
1262 ops[i]->src.length = ops_processed[i]->produced;
1264 ops[i]->dst.offset = 0;
1265 if (decompress_state == RTE_COMP_OP_STATELESS) {
1266 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1267 ops[i]->op_type = RTE_COMP_OP_STATELESS;
1268 } else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
1269 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1270 ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1273 "Stateful operations are not supported "
1274 "in these tests yet\n");
1277 ops[i]->input_chksum = 0;
1279 * Copy private data from previous operations,
1280 * to keep the pointer to the original buffer
1282 memcpy(ops[i] + 1, ops_processed[i] + 1,
1283 sizeof(struct priv_op_data));
1287 * Free the previous compress operations,
1288 * as they are not needed anymore
1290 rte_comp_op_bulk_free(ops_processed, num_bufs);
1292 /* Decompress data (either with Zlib API or compressdev API */
1293 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1294 for (i = 0; i < num_bufs; i++) {
1295 priv_data = (struct priv_op_data *)(ops[i] + 1);
1296 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1297 const struct rte_comp_xform *decompress_xform =
1298 decompress_xforms[xform_idx];
1300 ret = decompress_zlib(ops[i], decompress_xform);
1304 ops_processed[i] = ops[i];
1307 if (decompress_state == RTE_COMP_OP_STATELESS) {
1308 /* Create decompress private xform data */
1309 for (i = 0; i < num_xforms; i++) {
1310 ret = rte_compressdev_private_xform_create(0,
1311 (const struct rte_comp_xform *)
1312 decompress_xforms[i],
1316 "Decompression private xform "
1317 "could not be created\n");
1323 if (capa->comp_feature_flags &
1324 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1325 /* Attach shareable private xform data to ops */
1326 for (i = 0; i < num_bufs; i++) {
1327 priv_data = (struct priv_op_data *)
1329 uint16_t xform_idx =
1330 priv_data->orig_idx % num_xforms;
1331 ops[i]->private_xform =
1332 priv_xforms[xform_idx];
1335 /* Create rest of the private xforms */
1336 /* for the other ops */
1337 for (i = num_xforms; i < num_bufs; i++) {
1339 rte_compressdev_private_xform_create(0,
1340 decompress_xforms[i % num_xforms],
1344 "Decompression private xform could not be created\n");
1350 /* Attach non shareable private xform data */
1352 for (i = 0; i < num_bufs; i++) {
1353 priv_data = (struct priv_op_data *)
1355 uint16_t xform_idx =
1356 priv_data->orig_idx;
1357 ops[i]->private_xform =
1358 priv_xforms[xform_idx];
1362 /* Create a stream object for stateful decompression */
1363 ret = rte_compressdev_stream_create(0,
1364 decompress_xforms[0], &stream);
1367 "Decompression stream could not be created, error %d\n",
1371 /* Attach stream to ops */
1372 for (i = 0; i < num_bufs; i++)
1373 ops[i]->stream = stream;
1377 /* Enqueue and dequeue all operations */
1378 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1379 if (num_enqd < num_bufs) {
1381 "The operations could not be enqueued\n");
1388 * If retrying a dequeue call, wait for 10 ms to allow
1389 * enough time to the driver to process the operations
1391 if (deqd_retries != 0) {
1393 * Avoid infinite loop if not all the
1394 * operations get out of the device
1396 if (deqd_retries == MAX_DEQD_RETRIES) {
1398 "Not all operations could be "
1402 usleep(DEQUEUE_WAIT_TIME);
1404 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1405 &ops_processed[num_total_deqd], num_bufs);
1406 num_total_deqd += num_deqd;
1408 } while (num_total_deqd < num_enqd);
1413 for (i = 0; i < num_bufs; i++) {
1414 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1415 char engine[] = "zlib, (directly, no PMD)";
1416 if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1417 strlcpy(engine, "pmd", sizeof(engine));
1418 RTE_LOG(DEBUG, USER1,
1419 "Buffer %u decompressed by %s from %u to %u bytes\n",
1420 buf_idx[priv_data->orig_idx], engine,
1421 ops_processed[i]->consumed, ops_processed[i]->produced);
1426 * Check operation status and free source mbuf (destination mbuf and
1427 * compress operation information is still needed)
1429 for (i = 0; i < num_bufs; i++) {
1430 if (out_of_space && oos_zlib_compress) {
1431 if (ops_processed[i]->status !=
1432 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1433 ret_status = TEST_FAILED;
1435 "Operation without expected out of "
1436 "space status error\n");
1442 if (decompress_state == RTE_COMP_OP_STATEFUL
1443 && (ops_processed[i]->status ==
1444 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1445 || ops_processed[i]->status ==
1446 RTE_COMP_OP_STATUS_SUCCESS)) {
1447 /* collect the output into all_decomp_data */
1448 const void *ptr = rte_pktmbuf_read(
1449 ops_processed[i]->m_dst,
1450 ops_processed[i]->dst.offset,
1451 ops_processed[i]->produced,
1453 decomp_produced_data_size);
1454 if (ptr != all_decomp_data + decomp_produced_data_size)
1455 rte_memcpy(all_decomp_data +
1456 decomp_produced_data_size,
1457 ptr, ops_processed[i]->produced);
1458 decomp_produced_data_size += ops_processed[i]->produced;
1459 if (ops_processed[i]->src.length >
1460 ops_processed[i]->consumed) {
1461 if (ops_processed[i]->status ==
1462 RTE_COMP_OP_STATUS_SUCCESS) {
1465 "Operation finished too early\n");
1469 if (step >= test_data->decompress_steps_max) {
1472 "Operation exceeded maximum steps\n");
1475 ops[i] = ops_processed[i];
1477 RTE_COMP_OP_STATUS_NOT_PROCESSED;
1478 ops[i]->src.offset +=
1479 ops_processed[i]->consumed;
1480 ops[i]->src.length -=
1481 ops_processed[i]->consumed;
1484 /* Compare the original stream with the */
1485 /* decompressed stream (in size and the data) */
1486 priv_data = (struct priv_op_data *)
1487 (ops_processed[i] + 1);
1489 test_bufs[priv_data->orig_idx];
1490 const char *buf2 = all_decomp_data;
1492 if (compare_buffers(buf1, strlen(buf1) + 1,
1493 buf2, decomp_produced_data_size) < 0)
1495 /* Test checksums */
1496 if (compress_xforms[0]->compress.chksum
1497 != RTE_COMP_CHECKSUM_NONE) {
1498 if (ops_processed[i]->output_chksum
1499 != compress_checksum[i]) {
1501 "The checksums differ\n"
1502 "Compression Checksum: %" PRIu64 "\tDecompression "
1503 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1504 ops_processed[i]->output_chksum);
1509 } else if (ops_processed[i]->status !=
1510 RTE_COMP_OP_STATUS_SUCCESS) {
1512 "Some operations were not successful\n");
1515 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1516 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1517 comp_bufs[priv_data->orig_idx] = NULL;
1520 if ((out_of_space && oos_zlib_compress)
1521 || (decompress_state == RTE_COMP_OP_STATEFUL)) {
1522 ret_status = TEST_SUCCESS;
1527 * Compare the original stream with the decompressed stream
1528 * (in size and the data)
1530 for (i = 0; i < num_bufs; i++) {
1531 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1532 const char *buf1 = test_data->use_external_mbufs ?
1533 test_data->inbuf_memzone->addr :
1534 test_bufs[priv_data->orig_idx];
1536 data_size = test_data->use_external_mbufs ?
1537 test_data->inbuf_data_size :
1539 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1540 if (contig_buf == NULL) {
1541 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1546 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1547 ops_processed[i]->produced, contig_buf);
1548 if (compare_buffers(buf1, data_size,
1549 buf2, ops_processed[i]->produced) < 0)
1552 /* Test checksums */
1553 if (compress_xforms[0]->compress.chksum !=
1554 RTE_COMP_CHECKSUM_NONE) {
1555 if (ops_processed[i]->output_chksum !=
1556 compress_checksum[i]) {
1557 RTE_LOG(ERR, USER1, "The checksums differ\n"
1558 "Compression Checksum: %" PRIu64 "\tDecompression "
1559 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1560 ops_processed[i]->output_chksum);
1565 rte_free(contig_buf);
1569 ret_status = TEST_SUCCESS;
1572 /* Free resources */
1573 for (i = 0; i < num_bufs; i++) {
1574 rte_pktmbuf_free(uncomp_bufs[i]);
1575 rte_pktmbuf_free(comp_bufs[i]);
1576 rte_comp_op_free(ops[i]);
1577 rte_comp_op_free(ops_processed[i]);
1579 for (i = 0; i < num_priv_xforms; i++)
1580 if (priv_xforms[i] != NULL)
1581 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1583 rte_compressdev_stream_free(0, stream);
1584 if (all_decomp_data != NULL)
1585 rte_free(all_decomp_data);
1586 rte_free(contig_buf);
1592 test_compressdev_deflate_stateless_fixed(void)
1594 struct comp_testsuite_params *ts_params = &testsuite_params;
1597 const struct rte_compressdev_capabilities *capab;
1599 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1600 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1602 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1605 struct rte_comp_xform *compress_xform =
1606 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1608 if (compress_xform == NULL) {
1610 "Compress xform could not be created\n");
1615 memcpy(compress_xform, ts_params->def_comp_xform,
1616 sizeof(struct rte_comp_xform));
1617 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1619 struct interim_data_params int_data = {
1624 &ts_params->def_decomp_xform,
1628 struct test_data_params test_data = {
1629 .compress_state = RTE_COMP_OP_STATELESS,
1630 .decompress_state = RTE_COMP_OP_STATELESS,
1631 .buff_type = LB_BOTH,
1632 .zlib_dir = ZLIB_DECOMPRESS,
1635 .overflow = OVERFLOW_DISABLED
1638 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1639 int_data.test_bufs = &compress_test_bufs[i];
1640 int_data.buf_idx = &i;
1642 /* Compress with compressdev, decompress with Zlib */
1643 test_data.zlib_dir = ZLIB_DECOMPRESS;
1644 ret = test_deflate_comp_decomp(&int_data, &test_data);
1648 /* Compress with Zlib, decompress with compressdev */
1649 test_data.zlib_dir = ZLIB_COMPRESS;
1650 ret = test_deflate_comp_decomp(&int_data, &test_data);
1658 rte_free(compress_xform);
1663 test_compressdev_deflate_stateless_dynamic(void)
1665 struct comp_testsuite_params *ts_params = &testsuite_params;
1668 struct rte_comp_xform *compress_xform =
1669 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1671 const struct rte_compressdev_capabilities *capab;
1673 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1674 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1676 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1679 if (compress_xform == NULL) {
1681 "Compress xform could not be created\n");
1686 memcpy(compress_xform, ts_params->def_comp_xform,
1687 sizeof(struct rte_comp_xform));
1688 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1690 struct interim_data_params int_data = {
1695 &ts_params->def_decomp_xform,
1699 struct test_data_params test_data = {
1700 .compress_state = RTE_COMP_OP_STATELESS,
1701 .decompress_state = RTE_COMP_OP_STATELESS,
1702 .buff_type = LB_BOTH,
1703 .zlib_dir = ZLIB_DECOMPRESS,
1706 .overflow = OVERFLOW_DISABLED
1709 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1710 int_data.test_bufs = &compress_test_bufs[i];
1711 int_data.buf_idx = &i;
1713 /* Compress with compressdev, decompress with Zlib */
1714 test_data.zlib_dir = ZLIB_DECOMPRESS;
1715 ret = test_deflate_comp_decomp(&int_data, &test_data);
1719 /* Compress with Zlib, decompress with compressdev */
1720 test_data.zlib_dir = ZLIB_COMPRESS;
1721 ret = test_deflate_comp_decomp(&int_data, &test_data);
1729 rte_free(compress_xform);
1734 test_compressdev_deflate_stateless_multi_op(void)
1736 struct comp_testsuite_params *ts_params = &testsuite_params;
1737 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1738 uint16_t buf_idx[num_bufs];
1742 for (i = 0; i < num_bufs; i++)
1745 struct interim_data_params int_data = {
1749 &ts_params->def_comp_xform,
1750 &ts_params->def_decomp_xform,
1754 struct test_data_params test_data = {
1755 .compress_state = RTE_COMP_OP_STATELESS,
1756 .decompress_state = RTE_COMP_OP_STATELESS,
1757 .buff_type = LB_BOTH,
1758 .zlib_dir = ZLIB_DECOMPRESS,
1761 .overflow = OVERFLOW_DISABLED
1764 /* Compress with compressdev, decompress with Zlib */
1765 test_data.zlib_dir = ZLIB_DECOMPRESS;
1766 ret = test_deflate_comp_decomp(&int_data, &test_data);
1770 /* Compress with Zlib, decompress with compressdev */
1771 test_data.zlib_dir = ZLIB_COMPRESS;
1772 ret = test_deflate_comp_decomp(&int_data, &test_data);
1776 return TEST_SUCCESS;
1780 test_compressdev_deflate_stateless_multi_level(void)
1782 struct comp_testsuite_params *ts_params = &testsuite_params;
1786 struct rte_comp_xform *compress_xform =
1787 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1789 if (compress_xform == NULL) {
1791 "Compress xform could not be created\n");
1796 memcpy(compress_xform, ts_params->def_comp_xform,
1797 sizeof(struct rte_comp_xform));
1799 struct interim_data_params int_data = {
1804 &ts_params->def_decomp_xform,
1808 struct test_data_params test_data = {
1809 .compress_state = RTE_COMP_OP_STATELESS,
1810 .decompress_state = RTE_COMP_OP_STATELESS,
1811 .buff_type = LB_BOTH,
1812 .zlib_dir = ZLIB_DECOMPRESS,
1815 .overflow = OVERFLOW_DISABLED
1818 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1819 int_data.test_bufs = &compress_test_bufs[i];
1820 int_data.buf_idx = &i;
1822 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1824 compress_xform->compress.level = level;
1825 /* Compress with compressdev, decompress with Zlib */
1826 test_data.zlib_dir = ZLIB_DECOMPRESS;
1827 ret = test_deflate_comp_decomp(&int_data, &test_data);
1836 rte_free(compress_xform);
1840 #define NUM_XFORMS 3
1842 test_compressdev_deflate_stateless_multi_xform(void)
1844 struct comp_testsuite_params *ts_params = &testsuite_params;
1845 uint16_t num_bufs = NUM_XFORMS;
1846 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1847 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1848 const char *test_buffers[NUM_XFORMS];
1850 unsigned int level = RTE_COMP_LEVEL_MIN;
1851 uint16_t buf_idx[num_bufs];
1854 /* Create multiple xforms with various levels */
1855 for (i = 0; i < NUM_XFORMS; i++) {
1856 compress_xforms[i] = rte_malloc(NULL,
1857 sizeof(struct rte_comp_xform), 0);
1858 if (compress_xforms[i] == NULL) {
1860 "Compress xform could not be created\n");
1865 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1866 sizeof(struct rte_comp_xform));
1867 compress_xforms[i]->compress.level = level;
1870 decompress_xforms[i] = rte_malloc(NULL,
1871 sizeof(struct rte_comp_xform), 0);
1872 if (decompress_xforms[i] == NULL) {
1874 "Decompress xform could not be created\n");
1879 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1880 sizeof(struct rte_comp_xform));
1883 for (i = 0; i < NUM_XFORMS; i++) {
1885 /* Use the same buffer in all sessions */
1886 test_buffers[i] = compress_test_bufs[0];
1889 struct interim_data_params int_data = {
1898 struct test_data_params test_data = {
1899 .compress_state = RTE_COMP_OP_STATELESS,
1900 .decompress_state = RTE_COMP_OP_STATELESS,
1901 .buff_type = LB_BOTH,
1902 .zlib_dir = ZLIB_DECOMPRESS,
1905 .overflow = OVERFLOW_DISABLED
1908 /* Compress with compressdev, decompress with Zlib */
1909 ret = test_deflate_comp_decomp(&int_data, &test_data);
1916 for (i = 0; i < NUM_XFORMS; i++) {
1917 rte_free(compress_xforms[i]);
1918 rte_free(decompress_xforms[i]);
1925 test_compressdev_deflate_stateless_sgl(void)
1927 struct comp_testsuite_params *ts_params = &testsuite_params;
1930 const struct rte_compressdev_capabilities *capab;
1932 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1933 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1935 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1938 struct interim_data_params int_data = {
1942 &ts_params->def_comp_xform,
1943 &ts_params->def_decomp_xform,
1947 struct test_data_params test_data = {
1948 .compress_state = RTE_COMP_OP_STATELESS,
1949 .decompress_state = RTE_COMP_OP_STATELESS,
1950 .buff_type = SGL_BOTH,
1951 .zlib_dir = ZLIB_DECOMPRESS,
1954 .overflow = OVERFLOW_DISABLED
1957 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1958 int_data.test_bufs = &compress_test_bufs[i];
1959 int_data.buf_idx = &i;
1961 /* Compress with compressdev, decompress with Zlib */
1962 test_data.zlib_dir = ZLIB_DECOMPRESS;
1963 ret = test_deflate_comp_decomp(&int_data, &test_data);
1967 /* Compress with Zlib, decompress with compressdev */
1968 test_data.zlib_dir = ZLIB_COMPRESS;
1969 ret = test_deflate_comp_decomp(&int_data, &test_data);
1973 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1974 /* Compress with compressdev, decompress with Zlib */
1975 test_data.zlib_dir = ZLIB_DECOMPRESS;
1976 test_data.buff_type = SGL_TO_LB;
1977 ret = test_deflate_comp_decomp(&int_data, &test_data);
1981 /* Compress with Zlib, decompress with compressdev */
1982 test_data.zlib_dir = ZLIB_COMPRESS;
1983 test_data.buff_type = SGL_TO_LB;
1984 ret = test_deflate_comp_decomp(&int_data, &test_data);
1989 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1990 /* Compress with compressdev, decompress with Zlib */
1991 test_data.zlib_dir = ZLIB_DECOMPRESS;
1992 test_data.buff_type = LB_TO_SGL;
1993 ret = test_deflate_comp_decomp(&int_data, &test_data);
1997 /* Compress with Zlib, decompress with compressdev */
1998 test_data.zlib_dir = ZLIB_COMPRESS;
1999 test_data.buff_type = LB_TO_SGL;
2000 ret = test_deflate_comp_decomp(&int_data, &test_data);
2006 return TEST_SUCCESS;
2010 test_compressdev_deflate_stateless_checksum(void)
2012 struct comp_testsuite_params *ts_params = &testsuite_params;
2015 const struct rte_compressdev_capabilities *capab;
2017 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2018 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2020 /* Check if driver supports any checksum */
2021 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
2022 (capab->comp_feature_flags &
2023 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
2024 (capab->comp_feature_flags &
2025 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
2028 struct rte_comp_xform *compress_xform =
2029 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2030 if (compress_xform == NULL) {
2031 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2035 memcpy(compress_xform, ts_params->def_comp_xform,
2036 sizeof(struct rte_comp_xform));
2038 struct rte_comp_xform *decompress_xform =
2039 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2040 if (decompress_xform == NULL) {
2041 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2042 rte_free(compress_xform);
2046 memcpy(decompress_xform, ts_params->def_decomp_xform,
2047 sizeof(struct rte_comp_xform));
2049 struct interim_data_params int_data = {
2058 struct test_data_params test_data = {
2059 .compress_state = RTE_COMP_OP_STATELESS,
2060 .decompress_state = RTE_COMP_OP_STATELESS,
2061 .buff_type = LB_BOTH,
2062 .zlib_dir = ZLIB_DECOMPRESS,
2065 .overflow = OVERFLOW_DISABLED
2068 /* Check if driver supports crc32 checksum and test */
2069 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
2070 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2071 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2073 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2074 /* Compress with compressdev, decompress with Zlib */
2075 int_data.test_bufs = &compress_test_bufs[i];
2076 int_data.buf_idx = &i;
2078 /* Generate zlib checksum and test against selected
2079 * drivers decompression checksum
2081 test_data.zlib_dir = ZLIB_COMPRESS;
2082 ret = test_deflate_comp_decomp(&int_data, &test_data);
2086 /* Generate compression and decompression
2087 * checksum of selected driver
2089 test_data.zlib_dir = ZLIB_NONE;
2090 ret = test_deflate_comp_decomp(&int_data, &test_data);
2096 /* Check if driver supports adler32 checksum and test */
2097 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2098 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2099 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2101 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2102 int_data.test_bufs = &compress_test_bufs[i];
2103 int_data.buf_idx = &i;
2105 /* Generate zlib checksum and test against selected
2106 * drivers decompression checksum
2108 test_data.zlib_dir = ZLIB_COMPRESS;
2109 ret = test_deflate_comp_decomp(&int_data, &test_data);
2112 /* Generate compression and decompression
2113 * checksum of selected driver
2115 test_data.zlib_dir = ZLIB_NONE;
2116 ret = test_deflate_comp_decomp(&int_data, &test_data);
2122 /* Check if driver supports combined crc and adler checksum and test */
2123 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2124 compress_xform->compress.chksum =
2125 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2126 decompress_xform->decompress.chksum =
2127 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2129 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2130 int_data.test_bufs = &compress_test_bufs[i];
2131 int_data.buf_idx = &i;
2133 /* Generate compression and decompression
2134 * checksum of selected driver
2136 test_data.zlib_dir = ZLIB_NONE;
2137 ret = test_deflate_comp_decomp(&int_data, &test_data);
2146 rte_free(compress_xform);
2147 rte_free(decompress_xform);
2152 test_compressdev_out_of_space_buffer(void)
2154 struct comp_testsuite_params *ts_params = &testsuite_params;
2157 const struct rte_compressdev_capabilities *capab;
2159 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2161 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2162 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2164 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2167 struct interim_data_params int_data = {
2168 &compress_test_bufs[0],
2171 &ts_params->def_comp_xform,
2172 &ts_params->def_decomp_xform,
2176 struct test_data_params test_data = {
2177 .compress_state = RTE_COMP_OP_STATELESS,
2178 .decompress_state = RTE_COMP_OP_STATELESS,
2179 .buff_type = LB_BOTH,
2180 .zlib_dir = ZLIB_DECOMPRESS,
2181 .out_of_space = 1, /* run out-of-space test */
2183 .overflow = OVERFLOW_DISABLED
2185 /* Compress with compressdev, decompress with Zlib */
2186 test_data.zlib_dir = ZLIB_DECOMPRESS;
2187 ret = test_deflate_comp_decomp(&int_data, &test_data);
2191 /* Compress with Zlib, decompress with compressdev */
2192 test_data.zlib_dir = ZLIB_COMPRESS;
2193 ret = test_deflate_comp_decomp(&int_data, &test_data);
2197 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2198 /* Compress with compressdev, decompress with Zlib */
2199 test_data.zlib_dir = ZLIB_DECOMPRESS;
2200 test_data.buff_type = SGL_BOTH;
2201 ret = test_deflate_comp_decomp(&int_data, &test_data);
2205 /* Compress with Zlib, decompress with compressdev */
2206 test_data.zlib_dir = ZLIB_COMPRESS;
2207 test_data.buff_type = SGL_BOTH;
2208 ret = test_deflate_comp_decomp(&int_data, &test_data);
2220 test_compressdev_deflate_stateless_dynamic_big(void)
2222 struct comp_testsuite_params *ts_params = &testsuite_params;
2226 const struct rte_compressdev_capabilities *capab;
2227 char *test_buffer = NULL;
2229 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2230 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2232 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2235 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2238 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2239 if (test_buffer == NULL) {
2241 "Can't allocate buffer for big-data\n");
2245 struct interim_data_params int_data = {
2246 (const char * const *)&test_buffer,
2249 &ts_params->def_comp_xform,
2250 &ts_params->def_decomp_xform,
2254 struct test_data_params test_data = {
2255 .compress_state = RTE_COMP_OP_STATELESS,
2256 .decompress_state = RTE_COMP_OP_STATELESS,
2257 .buff_type = SGL_BOTH,
2258 .zlib_dir = ZLIB_DECOMPRESS,
2261 .overflow = OVERFLOW_DISABLED
2264 ts_params->def_comp_xform->compress.deflate.huffman =
2265 RTE_COMP_HUFFMAN_DYNAMIC;
2267 /* fill the buffer with data based on rand. data */
2268 srand(BIG_DATA_TEST_SIZE);
2269 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2270 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2271 test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2273 /* Compress with compressdev, decompress with Zlib */
2274 test_data.zlib_dir = ZLIB_DECOMPRESS;
2275 ret = test_deflate_comp_decomp(&int_data, &test_data);
2279 /* Compress with Zlib, decompress with compressdev */
2280 test_data.zlib_dir = ZLIB_COMPRESS;
2281 ret = test_deflate_comp_decomp(&int_data, &test_data);
2288 ts_params->def_comp_xform->compress.deflate.huffman =
2289 RTE_COMP_HUFFMAN_DEFAULT;
2290 rte_free(test_buffer);
2295 test_compressdev_deflate_stateful_decomp(void)
2297 struct comp_testsuite_params *ts_params = &testsuite_params;
2300 const struct rte_compressdev_capabilities *capab;
2302 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2303 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2305 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2308 struct interim_data_params int_data = {
2309 &compress_test_bufs[0],
2312 &ts_params->def_comp_xform,
2313 &ts_params->def_decomp_xform,
2317 struct test_data_params test_data = {
2318 .compress_state = RTE_COMP_OP_STATELESS,
2319 .decompress_state = RTE_COMP_OP_STATEFUL,
2320 .buff_type = LB_BOTH,
2321 .zlib_dir = ZLIB_COMPRESS,
2324 .decompress_output_block_size = 2000,
2325 .decompress_steps_max = 4,
2326 .overflow = OVERFLOW_DISABLED
2329 /* Compress with Zlib, decompress with compressdev */
2330 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2335 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2336 /* Now test with SGL buffers */
2337 test_data.buff_type = SGL_BOTH;
2338 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2351 test_compressdev_deflate_stateful_decomp_checksum(void)
2353 struct comp_testsuite_params *ts_params = &testsuite_params;
2356 const struct rte_compressdev_capabilities *capab;
2358 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2359 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2361 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2364 /* Check if driver supports any checksum */
2365 if (!(capab->comp_feature_flags &
2366 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2367 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2370 struct rte_comp_xform *compress_xform =
2371 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2372 if (compress_xform == NULL) {
2373 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2377 memcpy(compress_xform, ts_params->def_comp_xform,
2378 sizeof(struct rte_comp_xform));
2380 struct rte_comp_xform *decompress_xform =
2381 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2382 if (decompress_xform == NULL) {
2383 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2384 rte_free(compress_xform);
2388 memcpy(decompress_xform, ts_params->def_decomp_xform,
2389 sizeof(struct rte_comp_xform));
2391 struct interim_data_params int_data = {
2392 &compress_test_bufs[0],
2400 struct test_data_params test_data = {
2401 .compress_state = RTE_COMP_OP_STATELESS,
2402 .decompress_state = RTE_COMP_OP_STATEFUL,
2403 .buff_type = LB_BOTH,
2404 .zlib_dir = ZLIB_COMPRESS,
2407 .decompress_output_block_size = 2000,
2408 .decompress_steps_max = 4,
2409 .overflow = OVERFLOW_DISABLED
2412 /* Check if driver supports crc32 checksum and test */
2413 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2414 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2415 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2416 /* Compress with Zlib, decompress with compressdev */
2417 test_data.buff_type = LB_BOTH;
2418 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2422 if (capab->comp_feature_flags &
2423 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2424 /* Now test with SGL buffers */
2425 test_data.buff_type = SGL_BOTH;
2426 if (test_deflate_comp_decomp(&int_data,
2434 /* Check if driver supports adler32 checksum and test */
2435 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2436 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2437 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2438 /* Compress with Zlib, decompress with compressdev */
2439 test_data.buff_type = LB_BOTH;
2440 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2444 if (capab->comp_feature_flags &
2445 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2446 /* Now test with SGL buffers */
2447 test_data.buff_type = SGL_BOTH;
2448 if (test_deflate_comp_decomp(&int_data,
2456 /* Check if driver supports combined crc and adler checksum and test */
2457 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2458 compress_xform->compress.chksum =
2459 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2460 decompress_xform->decompress.chksum =
2461 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2462 /* Zlib doesn't support combined checksum */
2463 test_data.zlib_dir = ZLIB_NONE;
2464 /* Compress stateless, decompress stateful with compressdev */
2465 test_data.buff_type = LB_BOTH;
2466 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2470 if (capab->comp_feature_flags &
2471 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2472 /* Now test with SGL buffers */
2473 test_data.buff_type = SGL_BOTH;
2474 if (test_deflate_comp_decomp(&int_data,
2485 rte_free(compress_xform);
2486 rte_free(decompress_xform);
2490 static const struct rte_memzone *
2491 make_memzone(const char *name, size_t size)
2493 unsigned int socket_id = rte_socket_id();
2494 char mz_name[RTE_MEMZONE_NAMESIZE];
2495 const struct rte_memzone *memzone;
2497 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
2498 memzone = rte_memzone_lookup(mz_name);
2499 if (memzone != NULL && memzone->len != size) {
2500 rte_memzone_free(memzone);
2503 if (memzone == NULL) {
2504 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
2505 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2506 if (memzone == NULL)
2507 RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
2514 test_compressdev_external_mbufs(void)
2516 struct comp_testsuite_params *ts_params = &testsuite_params;
2517 size_t data_len = 0;
2519 int ret = TEST_FAILED;
2521 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
2522 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
2524 struct interim_data_params int_data = {
2528 &ts_params->def_comp_xform,
2529 &ts_params->def_decomp_xform,
2533 struct test_data_params test_data = {
2534 .compress_state = RTE_COMP_OP_STATELESS,
2535 .decompress_state = RTE_COMP_OP_STATELESS,
2536 .buff_type = LB_BOTH,
2537 .zlib_dir = ZLIB_DECOMPRESS,
2540 .use_external_mbufs = 1,
2541 .inbuf_data_size = data_len,
2542 .inbuf_memzone = make_memzone("inbuf", data_len),
2543 .compbuf_memzone = make_memzone("compbuf", data_len *
2544 COMPRESS_BUF_SIZE_RATIO),
2545 .uncompbuf_memzone = make_memzone("decompbuf", data_len),
2546 .overflow = OVERFLOW_DISABLED
2549 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2550 /* prepare input data */
2551 data_len = strlen(compress_test_bufs[i]) + 1;
2552 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
2554 test_data.inbuf_data_size = data_len;
2555 int_data.buf_idx = &i;
2557 /* Compress with compressdev, decompress with Zlib */
2558 test_data.zlib_dir = ZLIB_DECOMPRESS;
2559 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2562 /* Compress with Zlib, decompress with compressdev */
2563 test_data.zlib_dir = ZLIB_COMPRESS;
2564 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2571 rte_memzone_free(test_data.inbuf_memzone);
2572 rte_memzone_free(test_data.compbuf_memzone);
2573 rte_memzone_free(test_data.uncompbuf_memzone);
2578 test_compressdev_deflate_stateless_fixed_oos_recoverable(void)
2580 struct comp_testsuite_params *ts_params = &testsuite_params;
2584 const struct rte_compressdev_capabilities *capab;
2586 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2587 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2589 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2592 struct rte_comp_xform *compress_xform =
2593 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2595 if (compress_xform == NULL) {
2597 "Compress xform could not be created\n");
2602 memcpy(compress_xform, ts_params->def_comp_xform,
2603 sizeof(struct rte_comp_xform));
2604 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
2606 struct interim_data_params int_data = {
2611 &ts_params->def_decomp_xform,
2615 struct test_data_params test_data = {
2616 .compress_state = RTE_COMP_OP_STATELESS,
2617 .decompress_state = RTE_COMP_OP_STATELESS,
2618 .buff_type = LB_BOTH,
2619 .zlib_dir = ZLIB_DECOMPRESS,
2622 .overflow = OVERFLOW_ENABLED
2625 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2626 int_data.test_bufs = &compress_test_bufs[i];
2627 int_data.buf_idx = &i;
2629 /* Compress with compressdev, decompress with Zlib */
2630 test_data.zlib_dir = ZLIB_DECOMPRESS;
2631 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
2632 if (comp_result < 0) {
2635 } else if (comp_result > 0) {
2640 /* Compress with Zlib, decompress with compressdev */
2641 test_data.zlib_dir = ZLIB_COMPRESS;
2642 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
2643 if (comp_result < 0) {
2646 } else if (comp_result > 0) {
2655 rte_free(compress_xform);
2659 static struct unit_test_suite compressdev_testsuite = {
2660 .suite_name = "compressdev unit test suite",
2661 .setup = testsuite_setup,
2662 .teardown = testsuite_teardown,
2663 .unit_test_cases = {
2664 TEST_CASE_ST(NULL, NULL,
2665 test_compressdev_invalid_configuration),
2666 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2667 test_compressdev_deflate_stateless_fixed),
2668 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2669 test_compressdev_deflate_stateless_dynamic),
2670 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2671 test_compressdev_deflate_stateless_dynamic_big),
2672 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2673 test_compressdev_deflate_stateless_multi_op),
2674 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2675 test_compressdev_deflate_stateless_multi_level),
2676 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2677 test_compressdev_deflate_stateless_multi_xform),
2678 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2679 test_compressdev_deflate_stateless_sgl),
2680 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2681 test_compressdev_deflate_stateless_checksum),
2682 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2683 test_compressdev_out_of_space_buffer),
2684 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2685 test_compressdev_deflate_stateful_decomp),
2686 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2687 test_compressdev_deflate_stateful_decomp_checksum),
2688 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2689 test_compressdev_external_mbufs),
2690 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2691 test_compressdev_deflate_stateless_fixed_oos_recoverable),
2692 TEST_CASES_END() /**< NULL terminate unit test array */
2697 test_compressdev(void)
2699 return unit_test_suite_runner(&compressdev_testsuite);
2702 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);