1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
14 #include "test_compressdev_test_buffer.h"
17 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
19 #define DEFAULT_WINDOW_SIZE 15
20 #define DEFAULT_MEM_LEVEL 8
21 #define MAX_DEQD_RETRIES 10
22 #define DEQUEUE_WAIT_TIME 10000
25 * 30% extra size for compressed data compared to original data,
26 * in case data size cannot be reduced and it is actually bigger
27 * due to the compress block headers
29 #define COMPRESS_BUF_SIZE_RATIO 1.3
30 #define NUM_LARGE_MBUFS 16
31 #define SMALL_SEG_SIZE 256
34 #define NUM_MAX_XFORMS 16
35 #define NUM_MAX_INFLIGHT_OPS 128
39 huffman_type_strings[] = {
40 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
41 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
42 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
56 struct comp_testsuite_params {
57 struct rte_mempool *large_mbuf_pool;
58 struct rte_mempool *small_mbuf_pool;
59 struct rte_mempool *op_pool;
60 struct rte_comp_xform *def_comp_xform;
61 struct rte_comp_xform *def_decomp_xform;
64 static struct comp_testsuite_params testsuite_params = { 0 };
67 testsuite_teardown(void)
69 struct comp_testsuite_params *ts_params = &testsuite_params;
71 rte_mempool_free(ts_params->large_mbuf_pool);
72 rte_mempool_free(ts_params->small_mbuf_pool);
73 rte_mempool_free(ts_params->op_pool);
74 rte_free(ts_params->def_comp_xform);
75 rte_free(ts_params->def_decomp_xform);
81 struct comp_testsuite_params *ts_params = &testsuite_params;
82 uint32_t max_buf_size = 0;
85 if (rte_compressdev_count() == 0) {
86 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
90 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
91 rte_compressdev_name_get(0));
93 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
94 max_buf_size = RTE_MAX(max_buf_size,
95 strlen(compress_test_bufs[i]) + 1);
98 * Buffers to be used in compression and decompression.
99 * Since decompressed data might be larger than
100 * compressed data (due to block header),
101 * buffers should be big enough for both cases.
103 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
104 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
107 max_buf_size + RTE_PKTMBUF_HEADROOM,
109 if (ts_params->large_mbuf_pool == NULL) {
110 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
114 /* Create mempool with smaller buffers for SGL testing */
115 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
116 NUM_LARGE_MBUFS * MAX_SEGS,
118 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
120 if (ts_params->small_mbuf_pool == NULL) {
121 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
125 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
126 0, sizeof(struct priv_op_data),
128 if (ts_params->op_pool == NULL) {
129 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
133 ts_params->def_comp_xform =
134 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
135 if (ts_params->def_comp_xform == NULL) {
137 "Default compress xform could not be created\n");
140 ts_params->def_decomp_xform =
141 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
142 if (ts_params->def_decomp_xform == NULL) {
144 "Default decompress xform could not be created\n");
148 /* Initializes default values for compress/decompress xforms */
149 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
150 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
151 ts_params->def_comp_xform->compress.deflate.huffman =
152 RTE_COMP_HUFFMAN_DEFAULT;
153 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
154 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
155 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
157 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
158 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
159 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
160 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
165 testsuite_teardown();
171 generic_ut_setup(void)
173 /* Configure compressdev (one device, one queue pair) */
174 struct rte_compressdev_config config = {
175 .socket_id = rte_socket_id(),
177 .max_nb_priv_xforms = NUM_MAX_XFORMS,
181 if (rte_compressdev_configure(0, &config) < 0) {
182 RTE_LOG(ERR, USER1, "Device configuration failed\n");
186 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
187 rte_socket_id()) < 0) {
188 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
192 if (rte_compressdev_start(0) < 0) {
193 RTE_LOG(ERR, USER1, "Device could not be started\n");
201 generic_ut_teardown(void)
203 rte_compressdev_stop(0);
204 if (rte_compressdev_close(0) < 0)
205 RTE_LOG(ERR, USER1, "Device could not be closed\n");
209 test_compressdev_invalid_configuration(void)
211 struct rte_compressdev_config invalid_config;
212 struct rte_compressdev_config valid_config = {
213 .socket_id = rte_socket_id(),
215 .max_nb_priv_xforms = NUM_MAX_XFORMS,
218 struct rte_compressdev_info dev_info;
220 /* Invalid configuration with 0 queue pairs */
221 memcpy(&invalid_config, &valid_config,
222 sizeof(struct rte_compressdev_config));
223 invalid_config.nb_queue_pairs = 0;
225 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
226 "Device configuration was successful "
227 "with no queue pairs (invalid)\n");
230 * Invalid configuration with too many queue pairs
231 * (if there is an actual maximum number of queue pairs)
233 rte_compressdev_info_get(0, &dev_info);
234 if (dev_info.max_nb_queue_pairs != 0) {
235 memcpy(&invalid_config, &valid_config,
236 sizeof(struct rte_compressdev_config));
237 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
239 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
240 "Device configuration was successful "
241 "with too many queue pairs (invalid)\n");
244 /* Invalid queue pair setup, with no number of queue pairs set */
245 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
246 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
247 "Queue pair setup was successful "
248 "with no queue pairs set (invalid)\n");
254 compare_buffers(const char *buffer1, uint32_t buffer1_len,
255 const char *buffer2, uint32_t buffer2_len)
257 if (buffer1_len != buffer2_len) {
258 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
262 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
263 RTE_LOG(ERR, USER1, "Buffers are different\n");
271 * Maps compressdev and Zlib flush flags
274 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
277 case RTE_COMP_FLUSH_NONE:
279 case RTE_COMP_FLUSH_SYNC:
281 case RTE_COMP_FLUSH_FULL:
283 case RTE_COMP_FLUSH_FINAL:
286 * There should be only the values above,
287 * so this should never happen
295 compress_zlib(struct rte_comp_op *op,
296 const struct rte_comp_xform *xform, int mem_level)
300 int strategy, window_bits, comp_level;
301 int ret = TEST_FAILED;
302 uint8_t *single_src_buf = NULL;
303 uint8_t *single_dst_buf = NULL;
305 /* initialize zlib stream */
306 stream.zalloc = Z_NULL;
307 stream.zfree = Z_NULL;
308 stream.opaque = Z_NULL;
310 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
313 strategy = Z_DEFAULT_STRATEGY;
316 * Window bits is the base two logarithm of the window size (in bytes).
317 * When doing raw DEFLATE, this number will be negative.
319 window_bits = -(xform->compress.window_size);
321 comp_level = xform->compress.level;
323 if (comp_level != RTE_COMP_LEVEL_NONE)
324 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
325 window_bits, mem_level, strategy);
327 ret = deflateInit(&stream, Z_NO_COMPRESSION);
330 printf("Zlib deflate could not be initialized\n");
334 /* Assuming stateless operation */
336 if (op->m_src->nb_segs > 1) {
337 single_src_buf = rte_malloc(NULL,
338 rte_pktmbuf_pkt_len(op->m_src), 0);
339 if (single_src_buf == NULL) {
340 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
343 single_dst_buf = rte_malloc(NULL,
344 rte_pktmbuf_pkt_len(op->m_dst), 0);
345 if (single_dst_buf == NULL) {
346 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
349 if (rte_pktmbuf_read(op->m_src, 0,
350 rte_pktmbuf_pkt_len(op->m_src),
351 single_src_buf) == NULL) {
353 "Buffer could not be read entirely\n");
357 stream.avail_in = op->src.length;
358 stream.next_in = single_src_buf;
359 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
360 stream.next_out = single_dst_buf;
363 stream.avail_in = op->src.length;
364 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
365 stream.avail_out = op->m_dst->data_len;
366 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
368 /* Stateless operation, all buffer will be compressed in one go */
369 zlib_flush = map_zlib_flush_flag(op->flush_flag);
370 ret = deflate(&stream, zlib_flush);
372 if (stream.avail_in != 0) {
373 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
377 if (ret != Z_STREAM_END)
380 /* Copy data to destination SGL */
381 if (op->m_src->nb_segs > 1) {
382 uint32_t remaining_data = stream.total_out;
383 uint8_t *src_data = single_dst_buf;
384 struct rte_mbuf *dst_buf = op->m_dst;
386 while (remaining_data > 0) {
387 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
390 if (remaining_data < dst_buf->data_len) {
391 memcpy(dst_data, src_data, remaining_data);
394 memcpy(dst_data, src_data, dst_buf->data_len);
395 remaining_data -= dst_buf->data_len;
396 src_data += dst_buf->data_len;
397 dst_buf = dst_buf->next;
402 op->consumed = stream.total_in;
403 op->produced = stream.total_out;
404 op->status = RTE_COMP_OP_STATUS_SUCCESS;
406 deflateReset(&stream);
411 rte_free(single_src_buf);
412 rte_free(single_dst_buf);
418 decompress_zlib(struct rte_comp_op *op,
419 const struct rte_comp_xform *xform)
424 int ret = TEST_FAILED;
425 uint8_t *single_src_buf = NULL;
426 uint8_t *single_dst_buf = NULL;
428 /* initialize zlib stream */
429 stream.zalloc = Z_NULL;
430 stream.zfree = Z_NULL;
431 stream.opaque = Z_NULL;
434 * Window bits is the base two logarithm of the window size (in bytes).
435 * When doing raw DEFLATE, this number will be negative.
437 window_bits = -(xform->decompress.window_size);
439 ret = inflateInit2(&stream, window_bits);
442 printf("Zlib deflate could not be initialized\n");
446 /* Assuming stateless operation */
448 if (op->m_src->nb_segs > 1) {
449 single_src_buf = rte_malloc(NULL,
450 rte_pktmbuf_pkt_len(op->m_src), 0);
451 if (single_src_buf == NULL) {
452 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
455 single_dst_buf = rte_malloc(NULL,
456 rte_pktmbuf_pkt_len(op->m_dst), 0);
457 if (single_dst_buf == NULL) {
458 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
461 if (rte_pktmbuf_read(op->m_src, 0,
462 rte_pktmbuf_pkt_len(op->m_src),
463 single_src_buf) == NULL) {
465 "Buffer could not be read entirely\n");
469 stream.avail_in = op->src.length;
470 stream.next_in = single_src_buf;
471 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
472 stream.next_out = single_dst_buf;
475 stream.avail_in = op->src.length;
476 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
477 stream.avail_out = op->m_dst->data_len;
478 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
481 /* Stateless operation, all buffer will be compressed in one go */
482 zlib_flush = map_zlib_flush_flag(op->flush_flag);
483 ret = inflate(&stream, zlib_flush);
485 if (stream.avail_in != 0) {
486 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
490 if (ret != Z_STREAM_END)
493 if (op->m_src->nb_segs > 1) {
494 uint32_t remaining_data = stream.total_out;
495 uint8_t *src_data = single_dst_buf;
496 struct rte_mbuf *dst_buf = op->m_dst;
498 while (remaining_data > 0) {
499 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
502 if (remaining_data < dst_buf->data_len) {
503 memcpy(dst_data, src_data, remaining_data);
506 memcpy(dst_data, src_data, dst_buf->data_len);
507 remaining_data -= dst_buf->data_len;
508 src_data += dst_buf->data_len;
509 dst_buf = dst_buf->next;
514 op->consumed = stream.total_in;
515 op->produced = stream.total_out;
516 op->status = RTE_COMP_OP_STATUS_SUCCESS;
518 inflateReset(&stream);
528 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
529 uint32_t total_data_size,
530 struct rte_mempool *small_mbuf_pool,
531 struct rte_mempool *large_mbuf_pool,
532 uint8_t limit_segs_in_sgl)
534 uint32_t remaining_data = total_data_size;
535 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
536 struct rte_mempool *pool;
537 struct rte_mbuf *next_seg;
540 const char *data_ptr = test_buf;
544 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
545 num_remaining_segs = limit_segs_in_sgl - 1;
548 * Allocate data in the first segment (header) and
549 * copy data if test buffer is provided
551 if (remaining_data < SMALL_SEG_SIZE)
552 data_size = remaining_data;
554 data_size = SMALL_SEG_SIZE;
555 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
556 if (buf_ptr == NULL) {
558 "Not enough space in the 1st buffer\n");
562 if (data_ptr != NULL) {
563 /* Copy characters without NULL terminator */
564 strncpy(buf_ptr, data_ptr, data_size);
565 data_ptr += data_size;
567 remaining_data -= data_size;
568 num_remaining_segs--;
571 * Allocate the rest of the segments,
572 * copy the rest of the data and chain the segments.
574 for (i = 0; i < num_remaining_segs; i++) {
576 if (i == (num_remaining_segs - 1)) {
578 if (remaining_data > SMALL_SEG_SIZE)
579 pool = large_mbuf_pool;
581 pool = small_mbuf_pool;
582 data_size = remaining_data;
584 data_size = SMALL_SEG_SIZE;
585 pool = small_mbuf_pool;
588 next_seg = rte_pktmbuf_alloc(pool);
589 if (next_seg == NULL) {
591 "New segment could not be allocated "
592 "from the mempool\n");
595 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
596 if (buf_ptr == NULL) {
598 "Not enough space in the buffer\n");
599 rte_pktmbuf_free(next_seg);
602 if (data_ptr != NULL) {
603 /* Copy characters without NULL terminator */
604 strncpy(buf_ptr, data_ptr, data_size);
605 data_ptr += data_size;
607 remaining_data -= data_size;
609 ret = rte_pktmbuf_chain(head_buf, next_seg);
611 rte_pktmbuf_free(next_seg);
613 "Segment could not chained\n");
622 * Compresses and decompresses buffer with compressdev API and Zlib API
625 test_deflate_comp_decomp(const char * const test_bufs[],
626 unsigned int num_bufs,
628 struct rte_comp_xform *compress_xforms[],
629 struct rte_comp_xform *decompress_xforms[],
630 unsigned int num_xforms,
631 enum rte_comp_op_type state,
633 enum zlib_direction zlib_dir)
635 struct comp_testsuite_params *ts_params = &testsuite_params;
638 struct rte_mbuf *uncomp_bufs[num_bufs];
639 struct rte_mbuf *comp_bufs[num_bufs];
640 struct rte_comp_op *ops[num_bufs];
641 struct rte_comp_op *ops_processed[num_bufs];
642 void *priv_xforms[num_bufs];
643 uint16_t num_enqd, num_deqd, num_total_deqd;
644 uint16_t num_priv_xforms = 0;
645 unsigned int deqd_retries = 0;
646 struct priv_op_data *priv_data;
649 struct rte_mempool *buf_pool;
651 const struct rte_compressdev_capabilities *capa =
652 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
653 char *contig_buf = NULL;
655 /* Initialize all arrays to NULL */
656 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
657 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
658 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
659 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
660 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
663 buf_pool = ts_params->small_mbuf_pool;
665 buf_pool = ts_params->large_mbuf_pool;
667 /* Prepare the source mbufs with the data */
668 ret = rte_pktmbuf_alloc_bulk(buf_pool,
669 uncomp_bufs, num_bufs);
672 "Source mbufs could not be allocated "
673 "from the mempool\n");
678 for (i = 0; i < num_bufs; i++) {
679 data_size = strlen(test_bufs[i]) + 1;
680 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
682 ts_params->small_mbuf_pool,
683 ts_params->large_mbuf_pool,
688 for (i = 0; i < num_bufs; i++) {
689 data_size = strlen(test_bufs[i]) + 1;
690 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
691 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
695 /* Prepare the destination mbufs */
696 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
699 "Destination mbufs could not be allocated "
700 "from the mempool\n");
705 for (i = 0; i < num_bufs; i++) {
706 data_size = strlen(test_bufs[i]) *
707 COMPRESS_BUF_SIZE_RATIO;
708 if (prepare_sgl_bufs(NULL, comp_bufs[i],
710 ts_params->small_mbuf_pool,
711 ts_params->large_mbuf_pool,
717 for (i = 0; i < num_bufs; i++) {
718 data_size = strlen(test_bufs[i]) *
719 COMPRESS_BUF_SIZE_RATIO;
720 rte_pktmbuf_append(comp_bufs[i], data_size);
724 /* Build the compression operations */
725 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
728 "Compress operations could not be allocated "
729 "from the mempool\n");
733 for (i = 0; i < num_bufs; i++) {
734 ops[i]->m_src = uncomp_bufs[i];
735 ops[i]->m_dst = comp_bufs[i];
736 ops[i]->src.offset = 0;
737 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
738 ops[i]->dst.offset = 0;
739 if (state == RTE_COMP_OP_STATELESS) {
740 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
743 "Stateful operations are not supported "
744 "in these tests yet\n");
747 ops[i]->input_chksum = 0;
749 * Store original operation index in private data,
750 * since ordering does not have to be maintained,
751 * when dequeueing from compressdev, so a comparison
752 * at the end of the test can be done.
754 priv_data = (struct priv_op_data *) (ops[i] + 1);
755 priv_data->orig_idx = i;
758 /* Compress data (either with Zlib API or compressdev API */
759 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
760 for (i = 0; i < num_bufs; i++) {
761 const struct rte_comp_xform *compress_xform =
762 compress_xforms[i % num_xforms];
763 ret = compress_zlib(ops[i], compress_xform,
768 ops_processed[i] = ops[i];
771 /* Create compress private xform data */
772 for (i = 0; i < num_xforms; i++) {
773 ret = rte_compressdev_private_xform_create(0,
774 (const struct rte_comp_xform *)compress_xforms[i],
778 "Compression private xform "
779 "could not be created\n");
785 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
786 /* Attach shareable private xform data to ops */
787 for (i = 0; i < num_bufs; i++)
788 ops[i]->private_xform = priv_xforms[i % num_xforms];
790 /* Create rest of the private xforms for the other ops */
791 for (i = num_xforms; i < num_bufs; i++) {
792 ret = rte_compressdev_private_xform_create(0,
793 compress_xforms[i % num_xforms],
797 "Compression private xform "
798 "could not be created\n");
804 /* Attach non shareable private xform data to ops */
805 for (i = 0; i < num_bufs; i++)
806 ops[i]->private_xform = priv_xforms[i];
809 /* Enqueue and dequeue all operations */
810 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
811 if (num_enqd < num_bufs) {
813 "The operations could not be enqueued\n");
820 * If retrying a dequeue call, wait for 10 ms to allow
821 * enough time to the driver to process the operations
823 if (deqd_retries != 0) {
825 * Avoid infinite loop if not all the
826 * operations get out of the device
828 if (deqd_retries == MAX_DEQD_RETRIES) {
830 "Not all operations could be "
834 usleep(DEQUEUE_WAIT_TIME);
836 num_deqd = rte_compressdev_dequeue_burst(0, 0,
837 &ops_processed[num_total_deqd], num_bufs);
838 num_total_deqd += num_deqd;
840 } while (num_total_deqd < num_enqd);
844 /* Free compress private xforms */
845 for (i = 0; i < num_priv_xforms; i++) {
846 rte_compressdev_private_xform_free(0, priv_xforms[i]);
847 priv_xforms[i] = NULL;
852 for (i = 0; i < num_bufs; i++) {
853 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
854 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
855 const struct rte_comp_compress_xform *compress_xform =
856 &compress_xforms[xform_idx]->compress;
857 enum rte_comp_huffman huffman_type =
858 compress_xform->deflate.huffman;
859 RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
860 "(level = %d, huffman = %s)\n",
861 buf_idx[priv_data->orig_idx],
862 ops_processed[i]->consumed, ops_processed[i]->produced,
863 compress_xform->level,
864 huffman_type_strings[huffman_type]);
865 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
866 (float)ops_processed[i]->produced /
867 ops_processed[i]->consumed * 100);
872 * Check operation status and free source mbufs (destination mbuf and
873 * compress operation information is needed for the decompression stage)
875 for (i = 0; i < num_bufs; i++) {
876 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
878 "Some operations were not successful\n");
881 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
882 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
883 uncomp_bufs[priv_data->orig_idx] = NULL;
886 /* Allocate buffers for decompressed data */
887 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
890 "Destination mbufs could not be allocated "
891 "from the mempool\n");
896 for (i = 0; i < num_bufs; i++) {
897 priv_data = (struct priv_op_data *)
898 (ops_processed[i] + 1);
899 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
900 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
902 ts_params->small_mbuf_pool,
903 ts_params->large_mbuf_pool,
909 for (i = 0; i < num_bufs; i++) {
910 priv_data = (struct priv_op_data *)
911 (ops_processed[i] + 1);
912 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
913 rte_pktmbuf_append(uncomp_bufs[i], data_size);
917 /* Build the decompression operations */
918 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
921 "Decompress operations could not be allocated "
922 "from the mempool\n");
926 /* Source buffer is the compressed data from the previous operations */
927 for (i = 0; i < num_bufs; i++) {
928 ops[i]->m_src = ops_processed[i]->m_dst;
929 ops[i]->m_dst = uncomp_bufs[i];
930 ops[i]->src.offset = 0;
932 * Set the length of the compressed data to the
933 * number of bytes that were produced in the previous stage
935 ops[i]->src.length = ops_processed[i]->produced;
936 ops[i]->dst.offset = 0;
937 if (state == RTE_COMP_OP_STATELESS) {
938 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
941 "Stateful operations are not supported "
942 "in these tests yet\n");
945 ops[i]->input_chksum = 0;
947 * Copy private data from previous operations,
948 * to keep the pointer to the original buffer
950 memcpy(ops[i] + 1, ops_processed[i] + 1,
951 sizeof(struct priv_op_data));
955 * Free the previous compress operations,
956 * as it is not needed anymore
958 for (i = 0; i < num_bufs; i++) {
959 rte_comp_op_free(ops_processed[i]);
960 ops_processed[i] = NULL;
963 /* Decompress data (either with Zlib API or compressdev API */
964 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
965 for (i = 0; i < num_bufs; i++) {
966 priv_data = (struct priv_op_data *)(ops[i] + 1);
967 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
968 const struct rte_comp_xform *decompress_xform =
969 decompress_xforms[xform_idx];
971 ret = decompress_zlib(ops[i], decompress_xform);
975 ops_processed[i] = ops[i];
978 /* Create decompress private xform data */
979 for (i = 0; i < num_xforms; i++) {
980 ret = rte_compressdev_private_xform_create(0,
981 (const struct rte_comp_xform *)decompress_xforms[i],
985 "Decompression private xform "
986 "could not be created\n");
992 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
993 /* Attach shareable private xform data to ops */
994 for (i = 0; i < num_bufs; i++) {
995 priv_data = (struct priv_op_data *)(ops[i] + 1);
996 uint16_t xform_idx = priv_data->orig_idx %
998 ops[i]->private_xform = priv_xforms[xform_idx];
1001 /* Create rest of the private xforms for the other ops */
1002 for (i = num_xforms; i < num_bufs; i++) {
1003 ret = rte_compressdev_private_xform_create(0,
1004 decompress_xforms[i % num_xforms],
1008 "Decompression private xform "
1009 "could not be created\n");
1015 /* Attach non shareable private xform data to ops */
1016 for (i = 0; i < num_bufs; i++) {
1017 priv_data = (struct priv_op_data *)(ops[i] + 1);
1018 uint16_t xform_idx = priv_data->orig_idx;
1019 ops[i]->private_xform = priv_xforms[xform_idx];
1023 /* Enqueue and dequeue all operations */
1024 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1025 if (num_enqd < num_bufs) {
1027 "The operations could not be enqueued\n");
1034 * If retrying a dequeue call, wait for 10 ms to allow
1035 * enough time to the driver to process the operations
1037 if (deqd_retries != 0) {
1039 * Avoid infinite loop if not all the
1040 * operations get out of the device
1042 if (deqd_retries == MAX_DEQD_RETRIES) {
1044 "Not all operations could be "
1048 usleep(DEQUEUE_WAIT_TIME);
1050 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1051 &ops_processed[num_total_deqd], num_bufs);
1052 num_total_deqd += num_deqd;
1054 } while (num_total_deqd < num_enqd);
1059 for (i = 0; i < num_bufs; i++) {
1060 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1061 RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
1062 buf_idx[priv_data->orig_idx],
1063 ops_processed[i]->consumed, ops_processed[i]->produced);
1068 * Check operation status and free source mbuf (destination mbuf and
1069 * compress operation information is still needed)
1071 for (i = 0; i < num_bufs; i++) {
1072 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1074 "Some operations were not successful\n");
1077 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1078 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1079 comp_bufs[priv_data->orig_idx] = NULL;
1083 * Compare the original stream with the decompressed stream
1084 * (in size and the data)
1086 for (i = 0; i < num_bufs; i++) {
1087 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1088 const char *buf1 = test_bufs[priv_data->orig_idx];
1090 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1091 if (contig_buf == NULL) {
1092 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1097 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1098 ops_processed[i]->produced, contig_buf);
1100 if (compare_buffers(buf1, strlen(buf1) + 1,
1101 buf2, ops_processed[i]->produced) < 0)
1104 rte_free(contig_buf);
1111 /* Free resources */
1112 for (i = 0; i < num_bufs; i++) {
1113 rte_pktmbuf_free(uncomp_bufs[i]);
1114 rte_pktmbuf_free(comp_bufs[i]);
1115 rte_comp_op_free(ops[i]);
1116 rte_comp_op_free(ops_processed[i]);
1118 for (i = 0; i < num_priv_xforms; i++) {
1119 if (priv_xforms[i] != NULL)
1120 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1122 rte_free(contig_buf);
1128 test_compressdev_deflate_stateless_fixed(void)
1130 struct comp_testsuite_params *ts_params = &testsuite_params;
1131 const char *test_buffer;
1134 const struct rte_compressdev_capabilities *capab;
1136 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1137 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1139 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1142 struct rte_comp_xform *compress_xform =
1143 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1145 if (compress_xform == NULL) {
1147 "Compress xform could not be created\n");
1152 memcpy(compress_xform, ts_params->def_comp_xform,
1153 sizeof(struct rte_comp_xform));
1154 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1156 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1157 test_buffer = compress_test_bufs[i];
1159 /* Compress with compressdev, decompress with Zlib */
1160 if (test_deflate_comp_decomp(&test_buffer, 1,
1163 &ts_params->def_decomp_xform,
1165 RTE_COMP_OP_STATELESS,
1167 ZLIB_DECOMPRESS) < 0) {
1172 /* Compress with Zlib, decompress with compressdev */
1173 if (test_deflate_comp_decomp(&test_buffer, 1,
1176 &ts_params->def_decomp_xform,
1178 RTE_COMP_OP_STATELESS,
1180 ZLIB_COMPRESS) < 0) {
1189 rte_free(compress_xform);
1194 test_compressdev_deflate_stateless_dynamic(void)
1196 struct comp_testsuite_params *ts_params = &testsuite_params;
1197 const char *test_buffer;
1200 struct rte_comp_xform *compress_xform =
1201 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1203 const struct rte_compressdev_capabilities *capab;
1205 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1206 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1208 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1211 if (compress_xform == NULL) {
1213 "Compress xform could not be created\n");
1218 memcpy(compress_xform, ts_params->def_comp_xform,
1219 sizeof(struct rte_comp_xform));
1220 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1222 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1223 test_buffer = compress_test_bufs[i];
1225 /* Compress with compressdev, decompress with Zlib */
1226 if (test_deflate_comp_decomp(&test_buffer, 1,
1229 &ts_params->def_decomp_xform,
1231 RTE_COMP_OP_STATELESS,
1233 ZLIB_DECOMPRESS) < 0) {
1238 /* Compress with Zlib, decompress with compressdev */
1239 if (test_deflate_comp_decomp(&test_buffer, 1,
1242 &ts_params->def_decomp_xform,
1244 RTE_COMP_OP_STATELESS,
1246 ZLIB_COMPRESS) < 0) {
1255 rte_free(compress_xform);
1260 test_compressdev_deflate_stateless_multi_op(void)
1262 struct comp_testsuite_params *ts_params = &testsuite_params;
1263 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1264 uint16_t buf_idx[num_bufs];
1267 for (i = 0; i < num_bufs; i++)
1270 /* Compress with compressdev, decompress with Zlib */
1271 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1273 &ts_params->def_comp_xform,
1274 &ts_params->def_decomp_xform,
1276 RTE_COMP_OP_STATELESS,
1278 ZLIB_DECOMPRESS) < 0)
1281 /* Compress with Zlib, decompress with compressdev */
1282 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1284 &ts_params->def_comp_xform,
1285 &ts_params->def_decomp_xform,
1287 RTE_COMP_OP_STATELESS,
1292 return TEST_SUCCESS;
1296 test_compressdev_deflate_stateless_multi_level(void)
1298 struct comp_testsuite_params *ts_params = &testsuite_params;
1299 const char *test_buffer;
1303 struct rte_comp_xform *compress_xform =
1304 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1306 if (compress_xform == NULL) {
1308 "Compress xform could not be created\n");
1313 memcpy(compress_xform, ts_params->def_comp_xform,
1314 sizeof(struct rte_comp_xform));
1316 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1317 test_buffer = compress_test_bufs[i];
1318 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1320 compress_xform->compress.level = level;
1321 /* Compress with compressdev, decompress with Zlib */
1322 if (test_deflate_comp_decomp(&test_buffer, 1,
1325 &ts_params->def_decomp_xform,
1327 RTE_COMP_OP_STATELESS,
1329 ZLIB_DECOMPRESS) < 0) {
1339 rte_free(compress_xform);
1343 #define NUM_XFORMS 3
1345 test_compressdev_deflate_stateless_multi_xform(void)
1347 struct comp_testsuite_params *ts_params = &testsuite_params;
1348 uint16_t num_bufs = NUM_XFORMS;
1349 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1350 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1351 const char *test_buffers[NUM_XFORMS];
1353 unsigned int level = RTE_COMP_LEVEL_MIN;
1354 uint16_t buf_idx[num_bufs];
1358 /* Create multiple xforms with various levels */
1359 for (i = 0; i < NUM_XFORMS; i++) {
1360 compress_xforms[i] = rte_malloc(NULL,
1361 sizeof(struct rte_comp_xform), 0);
1362 if (compress_xforms[i] == NULL) {
1364 "Compress xform could not be created\n");
1369 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1370 sizeof(struct rte_comp_xform));
1371 compress_xforms[i]->compress.level = level;
1374 decompress_xforms[i] = rte_malloc(NULL,
1375 sizeof(struct rte_comp_xform), 0);
1376 if (decompress_xforms[i] == NULL) {
1378 "Decompress xform could not be created\n");
1383 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1384 sizeof(struct rte_comp_xform));
1387 for (i = 0; i < NUM_XFORMS; i++) {
1389 /* Use the same buffer in all sessions */
1390 test_buffers[i] = compress_test_bufs[0];
1392 /* Compress with compressdev, decompress with Zlib */
1393 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1398 RTE_COMP_OP_STATELESS,
1400 ZLIB_DECOMPRESS) < 0) {
1407 for (i = 0; i < NUM_XFORMS; i++) {
1408 rte_free(compress_xforms[i]);
1409 rte_free(decompress_xforms[i]);
1416 test_compressdev_deflate_stateless_sgl(void)
1418 struct comp_testsuite_params *ts_params = &testsuite_params;
1420 const char *test_buffer;
1421 const struct rte_compressdev_capabilities *capab;
1423 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1424 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1426 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1429 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1430 test_buffer = compress_test_bufs[i];
1431 /* Compress with compressdev, decompress with Zlib */
1432 if (test_deflate_comp_decomp(&test_buffer, 1,
1434 &ts_params->def_comp_xform,
1435 &ts_params->def_decomp_xform,
1437 RTE_COMP_OP_STATELESS,
1439 ZLIB_DECOMPRESS) < 0)
1442 /* Compress with Zlib, decompress with compressdev */
1443 if (test_deflate_comp_decomp(&test_buffer, 1,
1445 &ts_params->def_comp_xform,
1446 &ts_params->def_decomp_xform,
1448 RTE_COMP_OP_STATELESS,
1454 return TEST_SUCCESS;
1457 static struct unit_test_suite compressdev_testsuite = {
1458 .suite_name = "compressdev unit test suite",
1459 .setup = testsuite_setup,
1460 .teardown = testsuite_teardown,
1461 .unit_test_cases = {
1462 TEST_CASE_ST(NULL, NULL,
1463 test_compressdev_invalid_configuration),
1464 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1465 test_compressdev_deflate_stateless_fixed),
1466 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1467 test_compressdev_deflate_stateless_dynamic),
1468 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1469 test_compressdev_deflate_stateless_multi_op),
1470 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1471 test_compressdev_deflate_stateless_multi_level),
1472 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1473 test_compressdev_deflate_stateless_multi_xform),
1474 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1475 test_compressdev_deflate_stateless_sgl),
1476 TEST_CASES_END() /**< NULL terminate unit test array */
1481 test_compressdev(void)
1483 return unit_test_suite_runner(&compressdev_testsuite);
1486 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);