1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
15 #include "test_compressdev_test_buffer.h"
18 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
26 * 30% extra size for compressed data compared to original data,
27 * in case data size cannot be reduced and it is actually bigger
28 * due to the compress block headers
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
40 huffman_type_strings[] = {
41 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
42 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
43 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
57 struct comp_testsuite_params {
58 struct rte_mempool *large_mbuf_pool;
59 struct rte_mempool *small_mbuf_pool;
60 struct rte_mempool *op_pool;
61 struct rte_comp_xform *def_comp_xform;
62 struct rte_comp_xform *def_decomp_xform;
65 static struct comp_testsuite_params testsuite_params = { 0 };
68 testsuite_teardown(void)
70 struct comp_testsuite_params *ts_params = &testsuite_params;
72 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
73 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
74 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
75 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
76 if (rte_mempool_in_use_count(ts_params->op_pool))
77 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
79 rte_mempool_free(ts_params->large_mbuf_pool);
80 rte_mempool_free(ts_params->small_mbuf_pool);
81 rte_mempool_free(ts_params->op_pool);
82 rte_free(ts_params->def_comp_xform);
83 rte_free(ts_params->def_decomp_xform);
89 struct comp_testsuite_params *ts_params = &testsuite_params;
90 uint32_t max_buf_size = 0;
93 if (rte_compressdev_count() == 0) {
94 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
98 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
99 rte_compressdev_name_get(0));
101 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
102 max_buf_size = RTE_MAX(max_buf_size,
103 strlen(compress_test_bufs[i]) + 1);
106 * Buffers to be used in compression and decompression.
107 * Since decompressed data might be larger than
108 * compressed data (due to block header),
109 * buffers should be big enough for both cases.
111 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
112 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
115 max_buf_size + RTE_PKTMBUF_HEADROOM,
117 if (ts_params->large_mbuf_pool == NULL) {
118 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
122 /* Create mempool with smaller buffers for SGL testing */
123 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
124 NUM_LARGE_MBUFS * MAX_SEGS,
126 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
128 if (ts_params->small_mbuf_pool == NULL) {
129 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
133 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
134 0, sizeof(struct priv_op_data),
136 if (ts_params->op_pool == NULL) {
137 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
141 ts_params->def_comp_xform =
142 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143 if (ts_params->def_comp_xform == NULL) {
145 "Default compress xform could not be created\n");
148 ts_params->def_decomp_xform =
149 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
150 if (ts_params->def_decomp_xform == NULL) {
152 "Default decompress xform could not be created\n");
156 /* Initializes default values for compress/decompress xforms */
157 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
158 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
159 ts_params->def_comp_xform->compress.deflate.huffman =
160 RTE_COMP_HUFFMAN_DEFAULT;
161 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
162 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
163 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
165 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
166 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
167 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
168 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
173 testsuite_teardown();
179 generic_ut_setup(void)
181 /* Configure compressdev (one device, one queue pair) */
182 struct rte_compressdev_config config = {
183 .socket_id = rte_socket_id(),
185 .max_nb_priv_xforms = NUM_MAX_XFORMS,
189 if (rte_compressdev_configure(0, &config) < 0) {
190 RTE_LOG(ERR, USER1, "Device configuration failed\n");
194 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
195 rte_socket_id()) < 0) {
196 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
200 if (rte_compressdev_start(0) < 0) {
201 RTE_LOG(ERR, USER1, "Device could not be started\n");
209 generic_ut_teardown(void)
211 rte_compressdev_stop(0);
212 if (rte_compressdev_close(0) < 0)
213 RTE_LOG(ERR, USER1, "Device could not be closed\n");
217 test_compressdev_invalid_configuration(void)
219 struct rte_compressdev_config invalid_config;
220 struct rte_compressdev_config valid_config = {
221 .socket_id = rte_socket_id(),
223 .max_nb_priv_xforms = NUM_MAX_XFORMS,
226 struct rte_compressdev_info dev_info;
228 /* Invalid configuration with 0 queue pairs */
229 memcpy(&invalid_config, &valid_config,
230 sizeof(struct rte_compressdev_config));
231 invalid_config.nb_queue_pairs = 0;
233 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
234 "Device configuration was successful "
235 "with no queue pairs (invalid)\n");
238 * Invalid configuration with too many queue pairs
239 * (if there is an actual maximum number of queue pairs)
241 rte_compressdev_info_get(0, &dev_info);
242 if (dev_info.max_nb_queue_pairs != 0) {
243 memcpy(&invalid_config, &valid_config,
244 sizeof(struct rte_compressdev_config));
245 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
247 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
248 "Device configuration was successful "
249 "with too many queue pairs (invalid)\n");
252 /* Invalid queue pair setup, with no number of queue pairs set */
253 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
254 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
255 "Queue pair setup was successful "
256 "with no queue pairs set (invalid)\n");
262 compare_buffers(const char *buffer1, uint32_t buffer1_len,
263 const char *buffer2, uint32_t buffer2_len)
265 if (buffer1_len != buffer2_len) {
266 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
270 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
271 RTE_LOG(ERR, USER1, "Buffers are different\n");
279 * Maps compressdev and Zlib flush flags
282 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
285 case RTE_COMP_FLUSH_NONE:
287 case RTE_COMP_FLUSH_SYNC:
289 case RTE_COMP_FLUSH_FULL:
291 case RTE_COMP_FLUSH_FINAL:
294 * There should be only the values above,
295 * so this should never happen
303 compress_zlib(struct rte_comp_op *op,
304 const struct rte_comp_xform *xform, int mem_level)
308 int strategy, window_bits, comp_level;
309 int ret = TEST_FAILED;
310 uint8_t *single_src_buf = NULL;
311 uint8_t *single_dst_buf = NULL;
313 /* initialize zlib stream */
314 stream.zalloc = Z_NULL;
315 stream.zfree = Z_NULL;
316 stream.opaque = Z_NULL;
318 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
321 strategy = Z_DEFAULT_STRATEGY;
324 * Window bits is the base two logarithm of the window size (in bytes).
325 * When doing raw DEFLATE, this number will be negative.
327 window_bits = -(xform->compress.window_size);
329 comp_level = xform->compress.level;
331 if (comp_level != RTE_COMP_LEVEL_NONE)
332 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
333 window_bits, mem_level, strategy);
335 ret = deflateInit(&stream, Z_NO_COMPRESSION);
338 printf("Zlib deflate could not be initialized\n");
342 /* Assuming stateless operation */
344 if (op->m_src->nb_segs > 1) {
345 single_src_buf = rte_malloc(NULL,
346 rte_pktmbuf_pkt_len(op->m_src), 0);
347 if (single_src_buf == NULL) {
348 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
351 single_dst_buf = rte_malloc(NULL,
352 rte_pktmbuf_pkt_len(op->m_dst), 0);
353 if (single_dst_buf == NULL) {
354 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
357 if (rte_pktmbuf_read(op->m_src, 0,
358 rte_pktmbuf_pkt_len(op->m_src),
359 single_src_buf) == NULL) {
361 "Buffer could not be read entirely\n");
365 stream.avail_in = op->src.length;
366 stream.next_in = single_src_buf;
367 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
368 stream.next_out = single_dst_buf;
371 stream.avail_in = op->src.length;
372 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
373 stream.avail_out = op->m_dst->data_len;
374 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
376 /* Stateless operation, all buffer will be compressed in one go */
377 zlib_flush = map_zlib_flush_flag(op->flush_flag);
378 ret = deflate(&stream, zlib_flush);
380 if (stream.avail_in != 0) {
381 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
385 if (ret != Z_STREAM_END)
388 /* Copy data to destination SGL */
389 if (op->m_src->nb_segs > 1) {
390 uint32_t remaining_data = stream.total_out;
391 uint8_t *src_data = single_dst_buf;
392 struct rte_mbuf *dst_buf = op->m_dst;
394 while (remaining_data > 0) {
395 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
398 if (remaining_data < dst_buf->data_len) {
399 memcpy(dst_data, src_data, remaining_data);
402 memcpy(dst_data, src_data, dst_buf->data_len);
403 remaining_data -= dst_buf->data_len;
404 src_data += dst_buf->data_len;
405 dst_buf = dst_buf->next;
410 op->consumed = stream.total_in;
411 op->produced = stream.total_out;
412 op->status = RTE_COMP_OP_STATUS_SUCCESS;
414 deflateReset(&stream);
419 rte_free(single_src_buf);
420 rte_free(single_dst_buf);
426 decompress_zlib(struct rte_comp_op *op,
427 const struct rte_comp_xform *xform)
432 int ret = TEST_FAILED;
433 uint8_t *single_src_buf = NULL;
434 uint8_t *single_dst_buf = NULL;
436 /* initialize zlib stream */
437 stream.zalloc = Z_NULL;
438 stream.zfree = Z_NULL;
439 stream.opaque = Z_NULL;
442 * Window bits is the base two logarithm of the window size (in bytes).
443 * When doing raw DEFLATE, this number will be negative.
445 window_bits = -(xform->decompress.window_size);
447 ret = inflateInit2(&stream, window_bits);
450 printf("Zlib deflate could not be initialized\n");
454 /* Assuming stateless operation */
456 if (op->m_src->nb_segs > 1) {
457 single_src_buf = rte_malloc(NULL,
458 rte_pktmbuf_pkt_len(op->m_src), 0);
459 if (single_src_buf == NULL) {
460 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
463 single_dst_buf = rte_malloc(NULL,
464 rte_pktmbuf_pkt_len(op->m_dst), 0);
465 if (single_dst_buf == NULL) {
466 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
469 if (rte_pktmbuf_read(op->m_src, 0,
470 rte_pktmbuf_pkt_len(op->m_src),
471 single_src_buf) == NULL) {
473 "Buffer could not be read entirely\n");
477 stream.avail_in = op->src.length;
478 stream.next_in = single_src_buf;
479 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
480 stream.next_out = single_dst_buf;
483 stream.avail_in = op->src.length;
484 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
485 stream.avail_out = op->m_dst->data_len;
486 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
489 /* Stateless operation, all buffer will be compressed in one go */
490 zlib_flush = map_zlib_flush_flag(op->flush_flag);
491 ret = inflate(&stream, zlib_flush);
493 if (stream.avail_in != 0) {
494 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
498 if (ret != Z_STREAM_END)
501 if (op->m_src->nb_segs > 1) {
502 uint32_t remaining_data = stream.total_out;
503 uint8_t *src_data = single_dst_buf;
504 struct rte_mbuf *dst_buf = op->m_dst;
506 while (remaining_data > 0) {
507 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
510 if (remaining_data < dst_buf->data_len) {
511 memcpy(dst_data, src_data, remaining_data);
514 memcpy(dst_data, src_data, dst_buf->data_len);
515 remaining_data -= dst_buf->data_len;
516 src_data += dst_buf->data_len;
517 dst_buf = dst_buf->next;
522 op->consumed = stream.total_in;
523 op->produced = stream.total_out;
524 op->status = RTE_COMP_OP_STATUS_SUCCESS;
526 inflateReset(&stream);
536 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
537 uint32_t total_data_size,
538 struct rte_mempool *small_mbuf_pool,
539 struct rte_mempool *large_mbuf_pool,
540 uint8_t limit_segs_in_sgl)
542 uint32_t remaining_data = total_data_size;
543 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
544 struct rte_mempool *pool;
545 struct rte_mbuf *next_seg;
548 const char *data_ptr = test_buf;
552 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
553 num_remaining_segs = limit_segs_in_sgl - 1;
556 * Allocate data in the first segment (header) and
557 * copy data if test buffer is provided
559 if (remaining_data < SMALL_SEG_SIZE)
560 data_size = remaining_data;
562 data_size = SMALL_SEG_SIZE;
563 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
564 if (buf_ptr == NULL) {
566 "Not enough space in the 1st buffer\n");
570 if (data_ptr != NULL) {
571 /* Copy characters without NULL terminator */
572 strncpy(buf_ptr, data_ptr, data_size);
573 data_ptr += data_size;
575 remaining_data -= data_size;
576 num_remaining_segs--;
579 * Allocate the rest of the segments,
580 * copy the rest of the data and chain the segments.
582 for (i = 0; i < num_remaining_segs; i++) {
584 if (i == (num_remaining_segs - 1)) {
586 if (remaining_data > SMALL_SEG_SIZE)
587 pool = large_mbuf_pool;
589 pool = small_mbuf_pool;
590 data_size = remaining_data;
592 data_size = SMALL_SEG_SIZE;
593 pool = small_mbuf_pool;
596 next_seg = rte_pktmbuf_alloc(pool);
597 if (next_seg == NULL) {
599 "New segment could not be allocated "
600 "from the mempool\n");
603 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
604 if (buf_ptr == NULL) {
606 "Not enough space in the buffer\n");
607 rte_pktmbuf_free(next_seg);
610 if (data_ptr != NULL) {
611 /* Copy characters without NULL terminator */
612 strncpy(buf_ptr, data_ptr, data_size);
613 data_ptr += data_size;
615 remaining_data -= data_size;
617 ret = rte_pktmbuf_chain(head_buf, next_seg);
619 rte_pktmbuf_free(next_seg);
621 "Segment could not chained\n");
630 * Compresses and decompresses buffer with compressdev API and Zlib API
633 test_deflate_comp_decomp(const char * const test_bufs[],
634 unsigned int num_bufs,
636 struct rte_comp_xform *compress_xforms[],
637 struct rte_comp_xform *decompress_xforms[],
638 unsigned int num_xforms,
639 enum rte_comp_op_type state,
641 enum zlib_direction zlib_dir)
643 struct comp_testsuite_params *ts_params = &testsuite_params;
646 struct rte_mbuf *uncomp_bufs[num_bufs];
647 struct rte_mbuf *comp_bufs[num_bufs];
648 struct rte_comp_op *ops[num_bufs];
649 struct rte_comp_op *ops_processed[num_bufs];
650 void *priv_xforms[num_bufs];
651 uint16_t num_enqd, num_deqd, num_total_deqd;
652 uint16_t num_priv_xforms = 0;
653 unsigned int deqd_retries = 0;
654 struct priv_op_data *priv_data;
657 struct rte_mempool *buf_pool;
659 const struct rte_compressdev_capabilities *capa =
660 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
661 char *contig_buf = NULL;
663 /* Initialize all arrays to NULL */
664 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
665 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
666 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
667 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
668 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
671 buf_pool = ts_params->small_mbuf_pool;
673 buf_pool = ts_params->large_mbuf_pool;
675 /* Prepare the source mbufs with the data */
676 ret = rte_pktmbuf_alloc_bulk(buf_pool,
677 uncomp_bufs, num_bufs);
680 "Source mbufs could not be allocated "
681 "from the mempool\n");
686 for (i = 0; i < num_bufs; i++) {
687 data_size = strlen(test_bufs[i]) + 1;
688 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
690 ts_params->small_mbuf_pool,
691 ts_params->large_mbuf_pool,
696 for (i = 0; i < num_bufs; i++) {
697 data_size = strlen(test_bufs[i]) + 1;
698 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
699 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
703 /* Prepare the destination mbufs */
704 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
707 "Destination mbufs could not be allocated "
708 "from the mempool\n");
713 for (i = 0; i < num_bufs; i++) {
714 data_size = strlen(test_bufs[i]) *
715 COMPRESS_BUF_SIZE_RATIO;
716 if (prepare_sgl_bufs(NULL, comp_bufs[i],
718 ts_params->small_mbuf_pool,
719 ts_params->large_mbuf_pool,
725 for (i = 0; i < num_bufs; i++) {
726 data_size = strlen(test_bufs[i]) *
727 COMPRESS_BUF_SIZE_RATIO;
728 rte_pktmbuf_append(comp_bufs[i], data_size);
732 /* Build the compression operations */
733 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
736 "Compress operations could not be allocated "
737 "from the mempool\n");
742 for (i = 0; i < num_bufs; i++) {
743 ops[i]->m_src = uncomp_bufs[i];
744 ops[i]->m_dst = comp_bufs[i];
745 ops[i]->src.offset = 0;
746 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
747 ops[i]->dst.offset = 0;
748 if (state == RTE_COMP_OP_STATELESS) {
749 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
752 "Stateful operations are not supported "
753 "in these tests yet\n");
756 ops[i]->input_chksum = 0;
758 * Store original operation index in private data,
759 * since ordering does not have to be maintained,
760 * when dequeueing from compressdev, so a comparison
761 * at the end of the test can be done.
763 priv_data = (struct priv_op_data *) (ops[i] + 1);
764 priv_data->orig_idx = i;
767 /* Compress data (either with Zlib API or compressdev API */
768 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
769 for (i = 0; i < num_bufs; i++) {
770 const struct rte_comp_xform *compress_xform =
771 compress_xforms[i % num_xforms];
772 ret = compress_zlib(ops[i], compress_xform,
777 ops_processed[i] = ops[i];
780 /* Create compress private xform data */
781 for (i = 0; i < num_xforms; i++) {
782 ret = rte_compressdev_private_xform_create(0,
783 (const struct rte_comp_xform *)compress_xforms[i],
787 "Compression private xform "
788 "could not be created\n");
794 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
795 /* Attach shareable private xform data to ops */
796 for (i = 0; i < num_bufs; i++)
797 ops[i]->private_xform = priv_xforms[i % num_xforms];
799 /* Create rest of the private xforms for the other ops */
800 for (i = num_xforms; i < num_bufs; i++) {
801 ret = rte_compressdev_private_xform_create(0,
802 compress_xforms[i % num_xforms],
806 "Compression private xform "
807 "could not be created\n");
813 /* Attach non shareable private xform data to ops */
814 for (i = 0; i < num_bufs; i++)
815 ops[i]->private_xform = priv_xforms[i];
818 /* Enqueue and dequeue all operations */
819 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
820 if (num_enqd < num_bufs) {
822 "The operations could not be enqueued\n");
829 * If retrying a dequeue call, wait for 10 ms to allow
830 * enough time to the driver to process the operations
832 if (deqd_retries != 0) {
834 * Avoid infinite loop if not all the
835 * operations get out of the device
837 if (deqd_retries == MAX_DEQD_RETRIES) {
839 "Not all operations could be "
843 usleep(DEQUEUE_WAIT_TIME);
845 num_deqd = rte_compressdev_dequeue_burst(0, 0,
846 &ops_processed[num_total_deqd], num_bufs);
847 num_total_deqd += num_deqd;
849 } while (num_total_deqd < num_enqd);
853 /* Free compress private xforms */
854 for (i = 0; i < num_priv_xforms; i++) {
855 rte_compressdev_private_xform_free(0, priv_xforms[i]);
856 priv_xforms[i] = NULL;
861 for (i = 0; i < num_bufs; i++) {
862 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
863 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
864 const struct rte_comp_compress_xform *compress_xform =
865 &compress_xforms[xform_idx]->compress;
866 enum rte_comp_huffman huffman_type =
867 compress_xform->deflate.huffman;
868 char engine[] = "zlib (directly, not PMD)";
869 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
870 strlcpy(engine, "PMD", sizeof(engine));
872 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
873 " %u bytes (level = %d, huffman = %s)\n",
874 buf_idx[priv_data->orig_idx], engine,
875 ops_processed[i]->consumed, ops_processed[i]->produced,
876 compress_xform->level,
877 huffman_type_strings[huffman_type]);
878 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
879 ops_processed[i]->consumed == 0 ? 0 :
880 (float)ops_processed[i]->produced /
881 ops_processed[i]->consumed * 100);
886 * Check operation status and free source mbufs (destination mbuf and
887 * compress operation information is needed for the decompression stage)
889 for (i = 0; i < num_bufs; i++) {
890 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
892 "Some operations were not successful\n");
895 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
896 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
897 uncomp_bufs[priv_data->orig_idx] = NULL;
900 /* Allocate buffers for decompressed data */
901 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
904 "Destination mbufs could not be allocated "
905 "from the mempool\n");
910 for (i = 0; i < num_bufs; i++) {
911 priv_data = (struct priv_op_data *)
912 (ops_processed[i] + 1);
913 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
914 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
916 ts_params->small_mbuf_pool,
917 ts_params->large_mbuf_pool,
923 for (i = 0; i < num_bufs; i++) {
924 priv_data = (struct priv_op_data *)
925 (ops_processed[i] + 1);
926 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
927 rte_pktmbuf_append(uncomp_bufs[i], data_size);
931 /* Build the decompression operations */
932 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
935 "Decompress operations could not be allocated "
936 "from the mempool\n");
940 /* Source buffer is the compressed data from the previous operations */
941 for (i = 0; i < num_bufs; i++) {
942 ops[i]->m_src = ops_processed[i]->m_dst;
943 ops[i]->m_dst = uncomp_bufs[i];
944 ops[i]->src.offset = 0;
946 * Set the length of the compressed data to the
947 * number of bytes that were produced in the previous stage
949 ops[i]->src.length = ops_processed[i]->produced;
950 ops[i]->dst.offset = 0;
951 if (state == RTE_COMP_OP_STATELESS) {
952 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
955 "Stateful operations are not supported "
956 "in these tests yet\n");
959 ops[i]->input_chksum = 0;
961 * Copy private data from previous operations,
962 * to keep the pointer to the original buffer
964 memcpy(ops[i] + 1, ops_processed[i] + 1,
965 sizeof(struct priv_op_data));
969 * Free the previous compress operations,
970 * as they are not needed anymore
972 rte_comp_op_bulk_free(ops_processed, num_bufs);
974 /* Decompress data (either with Zlib API or compressdev API */
975 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
976 for (i = 0; i < num_bufs; i++) {
977 priv_data = (struct priv_op_data *)(ops[i] + 1);
978 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
979 const struct rte_comp_xform *decompress_xform =
980 decompress_xforms[xform_idx];
982 ret = decompress_zlib(ops[i], decompress_xform);
986 ops_processed[i] = ops[i];
989 /* Create decompress private xform data */
990 for (i = 0; i < num_xforms; i++) {
991 ret = rte_compressdev_private_xform_create(0,
992 (const struct rte_comp_xform *)decompress_xforms[i],
996 "Decompression private xform "
997 "could not be created\n");
1003 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1004 /* Attach shareable private xform data to ops */
1005 for (i = 0; i < num_bufs; i++) {
1006 priv_data = (struct priv_op_data *)(ops[i] + 1);
1007 uint16_t xform_idx = priv_data->orig_idx %
1009 ops[i]->private_xform = priv_xforms[xform_idx];
1012 /* Create rest of the private xforms for the other ops */
1013 for (i = num_xforms; i < num_bufs; i++) {
1014 ret = rte_compressdev_private_xform_create(0,
1015 decompress_xforms[i % num_xforms],
1019 "Decompression private xform "
1020 "could not be created\n");
1026 /* Attach non shareable private xform data to ops */
1027 for (i = 0; i < num_bufs; i++) {
1028 priv_data = (struct priv_op_data *)(ops[i] + 1);
1029 uint16_t xform_idx = priv_data->orig_idx;
1030 ops[i]->private_xform = priv_xforms[xform_idx];
1034 /* Enqueue and dequeue all operations */
1035 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1036 if (num_enqd < num_bufs) {
1038 "The operations could not be enqueued\n");
1045 * If retrying a dequeue call, wait for 10 ms to allow
1046 * enough time to the driver to process the operations
1048 if (deqd_retries != 0) {
1050 * Avoid infinite loop if not all the
1051 * operations get out of the device
1053 if (deqd_retries == MAX_DEQD_RETRIES) {
1055 "Not all operations could be "
1059 usleep(DEQUEUE_WAIT_TIME);
1061 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1062 &ops_processed[num_total_deqd], num_bufs);
1063 num_total_deqd += num_deqd;
1065 } while (num_total_deqd < num_enqd);
1070 for (i = 0; i < num_bufs; i++) {
1071 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1072 char engine[] = "zlib, (directly, no PMD)";
1073 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1074 strlcpy(engine, "pmd", sizeof(engine));
1075 RTE_LOG(DEBUG, USER1,
1076 "Buffer %u decompressed by %s from %u to %u bytes\n",
1077 buf_idx[priv_data->orig_idx], engine,
1078 ops_processed[i]->consumed, ops_processed[i]->produced);
1083 * Check operation status and free source mbuf (destination mbuf and
1084 * compress operation information is still needed)
1086 for (i = 0; i < num_bufs; i++) {
1087 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1089 "Some operations were not successful\n");
1092 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1093 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1094 comp_bufs[priv_data->orig_idx] = NULL;
1098 * Compare the original stream with the decompressed stream
1099 * (in size and the data)
1101 for (i = 0; i < num_bufs; i++) {
1102 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1103 const char *buf1 = test_bufs[priv_data->orig_idx];
1105 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1106 if (contig_buf == NULL) {
1107 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1112 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1113 ops_processed[i]->produced, contig_buf);
1115 if (compare_buffers(buf1, strlen(buf1) + 1,
1116 buf2, ops_processed[i]->produced) < 0)
1119 rte_free(contig_buf);
1126 /* Free resources */
1127 for (i = 0; i < num_bufs; i++) {
1128 rte_pktmbuf_free(uncomp_bufs[i]);
1129 rte_pktmbuf_free(comp_bufs[i]);
1130 rte_comp_op_free(ops[i]);
1131 rte_comp_op_free(ops_processed[i]);
1133 for (i = 0; i < num_priv_xforms; i++) {
1134 if (priv_xforms[i] != NULL)
1135 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1137 rte_free(contig_buf);
1143 test_compressdev_deflate_stateless_fixed(void)
1145 struct comp_testsuite_params *ts_params = &testsuite_params;
1146 const char *test_buffer;
1149 const struct rte_compressdev_capabilities *capab;
1151 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1152 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1154 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1157 struct rte_comp_xform *compress_xform =
1158 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1160 if (compress_xform == NULL) {
1162 "Compress xform could not be created\n");
1167 memcpy(compress_xform, ts_params->def_comp_xform,
1168 sizeof(struct rte_comp_xform));
1169 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1171 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1172 test_buffer = compress_test_bufs[i];
1174 /* Compress with compressdev, decompress with Zlib */
1175 if (test_deflate_comp_decomp(&test_buffer, 1,
1178 &ts_params->def_decomp_xform,
1180 RTE_COMP_OP_STATELESS,
1182 ZLIB_DECOMPRESS) < 0) {
1187 /* Compress with Zlib, decompress with compressdev */
1188 if (test_deflate_comp_decomp(&test_buffer, 1,
1191 &ts_params->def_decomp_xform,
1193 RTE_COMP_OP_STATELESS,
1195 ZLIB_COMPRESS) < 0) {
1204 rte_free(compress_xform);
1209 test_compressdev_deflate_stateless_dynamic(void)
1211 struct comp_testsuite_params *ts_params = &testsuite_params;
1212 const char *test_buffer;
1215 struct rte_comp_xform *compress_xform =
1216 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1218 const struct rte_compressdev_capabilities *capab;
1220 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1221 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1223 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1226 if (compress_xform == NULL) {
1228 "Compress xform could not be created\n");
1233 memcpy(compress_xform, ts_params->def_comp_xform,
1234 sizeof(struct rte_comp_xform));
1235 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1237 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1238 test_buffer = compress_test_bufs[i];
1240 /* Compress with compressdev, decompress with Zlib */
1241 if (test_deflate_comp_decomp(&test_buffer, 1,
1244 &ts_params->def_decomp_xform,
1246 RTE_COMP_OP_STATELESS,
1248 ZLIB_DECOMPRESS) < 0) {
1253 /* Compress with Zlib, decompress with compressdev */
1254 if (test_deflate_comp_decomp(&test_buffer, 1,
1257 &ts_params->def_decomp_xform,
1259 RTE_COMP_OP_STATELESS,
1261 ZLIB_COMPRESS) < 0) {
1270 rte_free(compress_xform);
1275 test_compressdev_deflate_stateless_multi_op(void)
1277 struct comp_testsuite_params *ts_params = &testsuite_params;
1278 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1279 uint16_t buf_idx[num_bufs];
1282 for (i = 0; i < num_bufs; i++)
1285 /* Compress with compressdev, decompress with Zlib */
1286 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1288 &ts_params->def_comp_xform,
1289 &ts_params->def_decomp_xform,
1291 RTE_COMP_OP_STATELESS,
1293 ZLIB_DECOMPRESS) < 0)
1296 /* Compress with Zlib, decompress with compressdev */
1297 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1299 &ts_params->def_comp_xform,
1300 &ts_params->def_decomp_xform,
1302 RTE_COMP_OP_STATELESS,
1307 return TEST_SUCCESS;
1311 test_compressdev_deflate_stateless_multi_level(void)
1313 struct comp_testsuite_params *ts_params = &testsuite_params;
1314 const char *test_buffer;
1318 struct rte_comp_xform *compress_xform =
1319 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1321 if (compress_xform == NULL) {
1323 "Compress xform could not be created\n");
1328 memcpy(compress_xform, ts_params->def_comp_xform,
1329 sizeof(struct rte_comp_xform));
1331 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1332 test_buffer = compress_test_bufs[i];
1333 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1335 compress_xform->compress.level = level;
1336 /* Compress with compressdev, decompress with Zlib */
1337 if (test_deflate_comp_decomp(&test_buffer, 1,
1340 &ts_params->def_decomp_xform,
1342 RTE_COMP_OP_STATELESS,
1344 ZLIB_DECOMPRESS) < 0) {
1354 rte_free(compress_xform);
1358 #define NUM_XFORMS 3
1360 test_compressdev_deflate_stateless_multi_xform(void)
1362 struct comp_testsuite_params *ts_params = &testsuite_params;
1363 uint16_t num_bufs = NUM_XFORMS;
1364 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1365 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1366 const char *test_buffers[NUM_XFORMS];
1368 unsigned int level = RTE_COMP_LEVEL_MIN;
1369 uint16_t buf_idx[num_bufs];
1373 /* Create multiple xforms with various levels */
1374 for (i = 0; i < NUM_XFORMS; i++) {
1375 compress_xforms[i] = rte_malloc(NULL,
1376 sizeof(struct rte_comp_xform), 0);
1377 if (compress_xforms[i] == NULL) {
1379 "Compress xform could not be created\n");
1384 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1385 sizeof(struct rte_comp_xform));
1386 compress_xforms[i]->compress.level = level;
1389 decompress_xforms[i] = rte_malloc(NULL,
1390 sizeof(struct rte_comp_xform), 0);
1391 if (decompress_xforms[i] == NULL) {
1393 "Decompress xform could not be created\n");
1398 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1399 sizeof(struct rte_comp_xform));
1402 for (i = 0; i < NUM_XFORMS; i++) {
1404 /* Use the same buffer in all sessions */
1405 test_buffers[i] = compress_test_bufs[0];
1407 /* Compress with compressdev, decompress with Zlib */
1408 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1413 RTE_COMP_OP_STATELESS,
1415 ZLIB_DECOMPRESS) < 0) {
1422 for (i = 0; i < NUM_XFORMS; i++) {
1423 rte_free(compress_xforms[i]);
1424 rte_free(decompress_xforms[i]);
1431 test_compressdev_deflate_stateless_sgl(void)
1433 struct comp_testsuite_params *ts_params = &testsuite_params;
1435 const char *test_buffer;
1436 const struct rte_compressdev_capabilities *capab;
1438 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1439 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1441 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1444 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1445 test_buffer = compress_test_bufs[i];
1446 /* Compress with compressdev, decompress with Zlib */
1447 if (test_deflate_comp_decomp(&test_buffer, 1,
1449 &ts_params->def_comp_xform,
1450 &ts_params->def_decomp_xform,
1452 RTE_COMP_OP_STATELESS,
1454 ZLIB_DECOMPRESS) < 0)
1457 /* Compress with Zlib, decompress with compressdev */
1458 if (test_deflate_comp_decomp(&test_buffer, 1,
1460 &ts_params->def_comp_xform,
1461 &ts_params->def_decomp_xform,
1463 RTE_COMP_OP_STATELESS,
1469 return TEST_SUCCESS;
1472 static struct unit_test_suite compressdev_testsuite = {
1473 .suite_name = "compressdev unit test suite",
1474 .setup = testsuite_setup,
1475 .teardown = testsuite_teardown,
1476 .unit_test_cases = {
1477 TEST_CASE_ST(NULL, NULL,
1478 test_compressdev_invalid_configuration),
1479 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1480 test_compressdev_deflate_stateless_fixed),
1481 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1482 test_compressdev_deflate_stateless_dynamic),
1483 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1484 test_compressdev_deflate_stateless_multi_op),
1485 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1486 test_compressdev_deflate_stateless_multi_level),
1487 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1488 test_compressdev_deflate_stateless_multi_xform),
1489 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1490 test_compressdev_deflate_stateless_sgl),
1491 TEST_CASES_END() /**< NULL terminate unit test array */
1496 test_compressdev(void)
1498 return unit_test_suite_runner(&compressdev_testsuite);
1501 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);