1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
14 #include "test_compressdev_test_buffer.h"
17 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
19 #define DEFAULT_WINDOW_SIZE 15
20 #define DEFAULT_MEM_LEVEL 8
21 #define MAX_DEQD_RETRIES 10
22 #define DEQUEUE_WAIT_TIME 10000
25 * 30% extra size for compressed data compared to original data,
26 * in case data size cannot be reduced and it is actually bigger
27 * due to the compress block headers
29 #define COMPRESS_BUF_SIZE_RATIO 1.3
30 #define NUM_LARGE_MBUFS 16
33 #define NUM_MAX_XFORMS 16
34 #define NUM_MAX_INFLIGHT_OPS 128
38 huffman_type_strings[] = {
39 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
40 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
41 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
55 struct comp_testsuite_params {
56 struct rte_mempool *large_mbuf_pool;
57 struct rte_mempool *small_mbuf_pool;
58 struct rte_mempool *op_pool;
59 struct rte_comp_xform *def_comp_xform;
60 struct rte_comp_xform *def_decomp_xform;
63 static struct comp_testsuite_params testsuite_params = { 0 };
66 testsuite_teardown(void)
68 struct comp_testsuite_params *ts_params = &testsuite_params;
70 rte_mempool_free(ts_params->large_mbuf_pool);
71 rte_mempool_free(ts_params->small_mbuf_pool);
72 rte_mempool_free(ts_params->op_pool);
73 rte_free(ts_params->def_comp_xform);
74 rte_free(ts_params->def_decomp_xform);
80 struct comp_testsuite_params *ts_params = &testsuite_params;
81 uint32_t max_buf_size = 0;
84 if (rte_compressdev_count() == 0) {
85 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
89 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
90 rte_compressdev_name_get(0));
92 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
93 max_buf_size = RTE_MAX(max_buf_size,
94 strlen(compress_test_bufs[i]) + 1);
97 * Buffers to be used in compression and decompression.
98 * Since decompressed data might be larger than
99 * compressed data (due to block header),
100 * buffers should be big enough for both cases.
102 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
103 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
106 max_buf_size + RTE_PKTMBUF_HEADROOM,
108 if (ts_params->large_mbuf_pool == NULL) {
109 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
113 /* Create mempool with smaller buffers for SGL testing */
114 uint16_t max_segs_per_buf = DIV_CEIL(max_buf_size, SEG_SIZE);
116 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
117 NUM_LARGE_MBUFS * max_segs_per_buf,
119 SEG_SIZE + RTE_PKTMBUF_HEADROOM,
121 if (ts_params->small_mbuf_pool == NULL) {
122 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
126 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
127 0, sizeof(struct priv_op_data),
129 if (ts_params->op_pool == NULL) {
130 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
134 ts_params->def_comp_xform =
135 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
136 if (ts_params->def_comp_xform == NULL) {
138 "Default compress xform could not be created\n");
141 ts_params->def_decomp_xform =
142 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143 if (ts_params->def_decomp_xform == NULL) {
145 "Default decompress xform could not be created\n");
149 /* Initializes default values for compress/decompress xforms */
150 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
151 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
152 ts_params->def_comp_xform->compress.deflate.huffman =
153 RTE_COMP_HUFFMAN_DEFAULT;
154 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
155 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
156 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
158 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
159 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
160 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
161 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
166 testsuite_teardown();
172 generic_ut_setup(void)
174 /* Configure compressdev (one device, one queue pair) */
175 struct rte_compressdev_config config = {
176 .socket_id = rte_socket_id(),
178 .max_nb_priv_xforms = NUM_MAX_XFORMS,
182 if (rte_compressdev_configure(0, &config) < 0) {
183 RTE_LOG(ERR, USER1, "Device configuration failed\n");
187 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
188 rte_socket_id()) < 0) {
189 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
193 if (rte_compressdev_start(0) < 0) {
194 RTE_LOG(ERR, USER1, "Device could not be started\n");
202 generic_ut_teardown(void)
204 rte_compressdev_stop(0);
205 if (rte_compressdev_close(0) < 0)
206 RTE_LOG(ERR, USER1, "Device could not be closed\n");
210 test_compressdev_invalid_configuration(void)
212 struct rte_compressdev_config invalid_config;
213 struct rte_compressdev_config valid_config = {
214 .socket_id = rte_socket_id(),
216 .max_nb_priv_xforms = NUM_MAX_XFORMS,
219 struct rte_compressdev_info dev_info;
221 /* Invalid configuration with 0 queue pairs */
222 memcpy(&invalid_config, &valid_config,
223 sizeof(struct rte_compressdev_config));
224 invalid_config.nb_queue_pairs = 0;
226 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
227 "Device configuration was successful "
228 "with no queue pairs (invalid)\n");
231 * Invalid configuration with too many queue pairs
232 * (if there is an actual maximum number of queue pairs)
234 rte_compressdev_info_get(0, &dev_info);
235 if (dev_info.max_nb_queue_pairs != 0) {
236 memcpy(&invalid_config, &valid_config,
237 sizeof(struct rte_compressdev_config));
238 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
240 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
241 "Device configuration was successful "
242 "with too many queue pairs (invalid)\n");
245 /* Invalid queue pair setup, with no number of queue pairs set */
246 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
247 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
248 "Queue pair setup was successful "
249 "with no queue pairs set (invalid)\n");
255 compare_buffers(const char *buffer1, uint32_t buffer1_len,
256 const char *buffer2, uint32_t buffer2_len)
258 if (buffer1_len != buffer2_len) {
259 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
263 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
264 RTE_LOG(ERR, USER1, "Buffers are different\n");
272 * Maps compressdev and Zlib flush flags
275 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
278 case RTE_COMP_FLUSH_NONE:
280 case RTE_COMP_FLUSH_SYNC:
282 case RTE_COMP_FLUSH_FULL:
284 case RTE_COMP_FLUSH_FINAL:
287 * There should be only the values above,
288 * so this should never happen
296 compress_zlib(struct rte_comp_op *op,
297 const struct rte_comp_xform *xform, int mem_level)
301 int strategy, window_bits, comp_level;
302 int ret = TEST_FAILED;
303 uint8_t *single_src_buf = NULL;
304 uint8_t *single_dst_buf = NULL;
306 /* initialize zlib stream */
307 stream.zalloc = Z_NULL;
308 stream.zfree = Z_NULL;
309 stream.opaque = Z_NULL;
311 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
314 strategy = Z_DEFAULT_STRATEGY;
317 * Window bits is the base two logarithm of the window size (in bytes).
318 * When doing raw DEFLATE, this number will be negative.
320 window_bits = -(xform->compress.window_size);
322 comp_level = xform->compress.level;
324 if (comp_level != RTE_COMP_LEVEL_NONE)
325 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
326 window_bits, mem_level, strategy);
328 ret = deflateInit(&stream, Z_NO_COMPRESSION);
331 printf("Zlib deflate could not be initialized\n");
335 /* Assuming stateless operation */
337 if (op->m_src->nb_segs > 1) {
338 single_src_buf = rte_malloc(NULL,
339 rte_pktmbuf_pkt_len(op->m_src), 0);
340 if (single_src_buf == NULL) {
341 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
344 single_dst_buf = rte_malloc(NULL,
345 rte_pktmbuf_pkt_len(op->m_dst), 0);
346 if (single_dst_buf == NULL) {
347 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
350 if (rte_pktmbuf_read(op->m_src, 0,
351 rte_pktmbuf_pkt_len(op->m_src),
352 single_src_buf) == NULL) {
354 "Buffer could not be read entirely\n");
358 stream.avail_in = op->src.length;
359 stream.next_in = single_src_buf;
360 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
361 stream.next_out = single_dst_buf;
364 stream.avail_in = op->src.length;
365 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
366 stream.avail_out = op->m_dst->data_len;
367 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
369 /* Stateless operation, all buffer will be compressed in one go */
370 zlib_flush = map_zlib_flush_flag(op->flush_flag);
371 ret = deflate(&stream, zlib_flush);
373 if (stream.avail_in != 0) {
374 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
378 if (ret != Z_STREAM_END)
381 /* Copy data to destination SGL */
382 if (op->m_src->nb_segs > 1) {
383 uint32_t remaining_data = stream.total_out;
384 uint8_t *src_data = single_dst_buf;
385 struct rte_mbuf *dst_buf = op->m_dst;
387 while (remaining_data > 0) {
388 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
391 if (remaining_data < dst_buf->data_len) {
392 memcpy(dst_data, src_data, remaining_data);
395 memcpy(dst_data, src_data, dst_buf->data_len);
396 remaining_data -= dst_buf->data_len;
397 src_data += dst_buf->data_len;
398 dst_buf = dst_buf->next;
403 op->consumed = stream.total_in;
404 op->produced = stream.total_out;
405 op->status = RTE_COMP_OP_STATUS_SUCCESS;
407 deflateReset(&stream);
412 rte_free(single_src_buf);
413 rte_free(single_dst_buf);
419 decompress_zlib(struct rte_comp_op *op,
420 const struct rte_comp_xform *xform)
425 int ret = TEST_FAILED;
426 uint8_t *single_src_buf = NULL;
427 uint8_t *single_dst_buf = NULL;
429 /* initialize zlib stream */
430 stream.zalloc = Z_NULL;
431 stream.zfree = Z_NULL;
432 stream.opaque = Z_NULL;
435 * Window bits is the base two logarithm of the window size (in bytes).
436 * When doing raw DEFLATE, this number will be negative.
438 window_bits = -(xform->decompress.window_size);
440 ret = inflateInit2(&stream, window_bits);
443 printf("Zlib deflate could not be initialized\n");
447 /* Assuming stateless operation */
449 if (op->m_src->nb_segs > 1) {
450 single_src_buf = rte_malloc(NULL,
451 rte_pktmbuf_pkt_len(op->m_src), 0);
452 if (single_src_buf == NULL) {
453 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
456 single_dst_buf = rte_malloc(NULL,
457 rte_pktmbuf_pkt_len(op->m_dst), 0);
458 if (single_dst_buf == NULL) {
459 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
462 if (rte_pktmbuf_read(op->m_src, 0,
463 rte_pktmbuf_pkt_len(op->m_src),
464 single_src_buf) == NULL) {
466 "Buffer could not be read entirely\n");
470 stream.avail_in = op->src.length;
471 stream.next_in = single_src_buf;
472 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
473 stream.next_out = single_dst_buf;
476 stream.avail_in = op->src.length;
477 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
478 stream.avail_out = op->m_dst->data_len;
479 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
482 /* Stateless operation, all buffer will be compressed in one go */
483 zlib_flush = map_zlib_flush_flag(op->flush_flag);
484 ret = inflate(&stream, zlib_flush);
486 if (stream.avail_in != 0) {
487 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
491 if (ret != Z_STREAM_END)
494 if (op->m_src->nb_segs > 1) {
495 uint32_t remaining_data = stream.total_out;
496 uint8_t *src_data = single_dst_buf;
497 struct rte_mbuf *dst_buf = op->m_dst;
499 while (remaining_data > 0) {
500 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
503 if (remaining_data < dst_buf->data_len) {
504 memcpy(dst_data, src_data, remaining_data);
507 memcpy(dst_data, src_data, dst_buf->data_len);
508 remaining_data -= dst_buf->data_len;
509 src_data += dst_buf->data_len;
510 dst_buf = dst_buf->next;
515 op->consumed = stream.total_in;
516 op->produced = stream.total_out;
517 op->status = RTE_COMP_OP_STATUS_SUCCESS;
519 inflateReset(&stream);
529 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
530 uint32_t total_data_size,
531 struct rte_mempool *pool)
533 uint32_t remaining_data = total_data_size;
534 uint16_t num_remaining_segs =
535 DIV_CEIL(remaining_data, SEG_SIZE);
536 struct rte_mbuf *next_seg;
539 const char *data_ptr = test_buf;
544 * Allocate data in the first segment (header) and
545 * copy data if test buffer is provided
547 if (remaining_data < SEG_SIZE)
548 data_size = remaining_data;
550 data_size = SEG_SIZE;
551 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
552 if (buf_ptr == NULL) {
554 "Not enough space in the buffer\n");
558 if (data_ptr != NULL) {
559 /* Copy characters without NULL terminator */
560 strncpy(buf_ptr, data_ptr, data_size);
561 data_ptr += data_size;
563 remaining_data -= data_size;
566 * Allocate the rest of the segments,
567 * copy the rest of the data and chain the segments.
569 for (i = 0; i < num_remaining_segs; i++) {
570 next_seg = rte_pktmbuf_alloc(pool);
571 if (next_seg == NULL) {
573 "New segment could not be allocated "
574 "from the mempool\n");
577 if (remaining_data < SEG_SIZE)
578 data_size = remaining_data;
580 data_size = SEG_SIZE;
581 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
582 if (buf_ptr == NULL) {
584 "Not enough space in the buffer\n");
585 rte_pktmbuf_free(next_seg);
588 if (data_ptr != NULL) {
589 /* Copy characters without NULL terminator */
590 strncpy(buf_ptr, data_ptr, data_size);
591 data_ptr += data_size;
593 remaining_data -= data_size;
595 ret = rte_pktmbuf_chain(head_buf, next_seg);
597 rte_pktmbuf_free(next_seg);
599 "Segment could not chained\n");
608 * Compresses and decompresses buffer with compressdev API and Zlib API
611 test_deflate_comp_decomp(const char * const test_bufs[],
612 unsigned int num_bufs,
614 struct rte_comp_xform *compress_xforms[],
615 struct rte_comp_xform *decompress_xforms[],
616 unsigned int num_xforms,
617 enum rte_comp_op_type state,
619 enum zlib_direction zlib_dir)
621 struct comp_testsuite_params *ts_params = &testsuite_params;
624 struct rte_mbuf *uncomp_bufs[num_bufs];
625 struct rte_mbuf *comp_bufs[num_bufs];
626 struct rte_comp_op *ops[num_bufs];
627 struct rte_comp_op *ops_processed[num_bufs];
628 void *priv_xforms[num_bufs];
629 uint16_t num_enqd, num_deqd, num_total_deqd;
630 uint16_t num_priv_xforms = 0;
631 unsigned int deqd_retries = 0;
632 struct priv_op_data *priv_data;
635 struct rte_mempool *buf_pool;
637 const struct rte_compressdev_capabilities *capa =
638 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
639 char *contig_buf = NULL;
641 /* Initialize all arrays to NULL */
642 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
643 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
644 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
645 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
646 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
649 buf_pool = ts_params->small_mbuf_pool;
651 buf_pool = ts_params->large_mbuf_pool;
653 /* Prepare the source mbufs with the data */
654 ret = rte_pktmbuf_alloc_bulk(buf_pool,
655 uncomp_bufs, num_bufs);
658 "Source mbufs could not be allocated "
659 "from the mempool\n");
664 for (i = 0; i < num_bufs; i++) {
665 data_size = strlen(test_bufs[i]) + 1;
666 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
672 for (i = 0; i < num_bufs; i++) {
673 data_size = strlen(test_bufs[i]) + 1;
674 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
675 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
679 /* Prepare the destination mbufs */
680 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
683 "Destination mbufs could not be allocated "
684 "from the mempool\n");
689 for (i = 0; i < num_bufs; i++) {
690 data_size = strlen(test_bufs[i]) *
691 COMPRESS_BUF_SIZE_RATIO;
692 if (prepare_sgl_bufs(NULL, comp_bufs[i],
699 for (i = 0; i < num_bufs; i++) {
700 data_size = strlen(test_bufs[i]) *
701 COMPRESS_BUF_SIZE_RATIO;
702 rte_pktmbuf_append(comp_bufs[i], data_size);
706 /* Build the compression operations */
707 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
710 "Compress operations could not be allocated "
711 "from the mempool\n");
715 for (i = 0; i < num_bufs; i++) {
716 ops[i]->m_src = uncomp_bufs[i];
717 ops[i]->m_dst = comp_bufs[i];
718 ops[i]->src.offset = 0;
719 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
720 ops[i]->dst.offset = 0;
721 if (state == RTE_COMP_OP_STATELESS) {
722 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
725 "Stateful operations are not supported "
726 "in these tests yet\n");
729 ops[i]->input_chksum = 0;
731 * Store original operation index in private data,
732 * since ordering does not have to be maintained,
733 * when dequeueing from compressdev, so a comparison
734 * at the end of the test can be done.
736 priv_data = (struct priv_op_data *) (ops[i] + 1);
737 priv_data->orig_idx = i;
740 /* Compress data (either with Zlib API or compressdev API */
741 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
742 for (i = 0; i < num_bufs; i++) {
743 const struct rte_comp_xform *compress_xform =
744 compress_xforms[i % num_xforms];
745 ret = compress_zlib(ops[i], compress_xform,
750 ops_processed[i] = ops[i];
753 /* Create compress private xform data */
754 for (i = 0; i < num_xforms; i++) {
755 ret = rte_compressdev_private_xform_create(0,
756 (const struct rte_comp_xform *)compress_xforms[i],
760 "Compression private xform "
761 "could not be created\n");
767 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
768 /* Attach shareable private xform data to ops */
769 for (i = 0; i < num_bufs; i++)
770 ops[i]->private_xform = priv_xforms[i % num_xforms];
772 /* Create rest of the private xforms for the other ops */
773 for (i = num_xforms; i < num_bufs; i++) {
774 ret = rte_compressdev_private_xform_create(0,
775 compress_xforms[i % num_xforms],
779 "Compression private xform "
780 "could not be created\n");
786 /* Attach non shareable private xform data to ops */
787 for (i = 0; i < num_bufs; i++)
788 ops[i]->private_xform = priv_xforms[i];
791 /* Enqueue and dequeue all operations */
792 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
793 if (num_enqd < num_bufs) {
795 "The operations could not be enqueued\n");
802 * If retrying a dequeue call, wait for 10 ms to allow
803 * enough time to the driver to process the operations
805 if (deqd_retries != 0) {
807 * Avoid infinite loop if not all the
808 * operations get out of the device
810 if (deqd_retries == MAX_DEQD_RETRIES) {
812 "Not all operations could be "
816 usleep(DEQUEUE_WAIT_TIME);
818 num_deqd = rte_compressdev_dequeue_burst(0, 0,
819 &ops_processed[num_total_deqd], num_bufs);
820 num_total_deqd += num_deqd;
822 } while (num_total_deqd < num_enqd);
826 /* Free compress private xforms */
827 for (i = 0; i < num_priv_xforms; i++) {
828 rte_compressdev_private_xform_free(0, priv_xforms[i]);
829 priv_xforms[i] = NULL;
834 for (i = 0; i < num_bufs; i++) {
835 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
836 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
837 const struct rte_comp_compress_xform *compress_xform =
838 &compress_xforms[xform_idx]->compress;
839 enum rte_comp_huffman huffman_type =
840 compress_xform->deflate.huffman;
841 RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
842 "(level = %d, huffman = %s)\n",
843 buf_idx[priv_data->orig_idx],
844 ops_processed[i]->consumed, ops_processed[i]->produced,
845 compress_xform->level,
846 huffman_type_strings[huffman_type]);
847 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
848 (float)ops_processed[i]->produced /
849 ops_processed[i]->consumed * 100);
854 * Check operation status and free source mbufs (destination mbuf and
855 * compress operation information is needed for the decompression stage)
857 for (i = 0; i < num_bufs; i++) {
858 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
860 "Some operations were not successful\n");
863 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
864 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
865 uncomp_bufs[priv_data->orig_idx] = NULL;
868 /* Allocate buffers for decompressed data */
869 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
872 "Destination mbufs could not be allocated "
873 "from the mempool\n");
878 for (i = 0; i < num_bufs; i++) {
879 priv_data = (struct priv_op_data *)
880 (ops_processed[i] + 1);
881 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
882 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
883 data_size, buf_pool) < 0)
888 for (i = 0; i < num_bufs; i++) {
889 priv_data = (struct priv_op_data *)
890 (ops_processed[i] + 1);
891 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
892 rte_pktmbuf_append(uncomp_bufs[i], data_size);
896 /* Build the decompression operations */
897 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
900 "Decompress operations could not be allocated "
901 "from the mempool\n");
905 /* Source buffer is the compressed data from the previous operations */
906 for (i = 0; i < num_bufs; i++) {
907 ops[i]->m_src = ops_processed[i]->m_dst;
908 ops[i]->m_dst = uncomp_bufs[i];
909 ops[i]->src.offset = 0;
911 * Set the length of the compressed data to the
912 * number of bytes that were produced in the previous stage
914 ops[i]->src.length = ops_processed[i]->produced;
915 ops[i]->dst.offset = 0;
916 if (state == RTE_COMP_OP_STATELESS) {
917 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
920 "Stateful operations are not supported "
921 "in these tests yet\n");
924 ops[i]->input_chksum = 0;
926 * Copy private data from previous operations,
927 * to keep the pointer to the original buffer
929 memcpy(ops[i] + 1, ops_processed[i] + 1,
930 sizeof(struct priv_op_data));
934 * Free the previous compress operations,
935 * as it is not needed anymore
937 for (i = 0; i < num_bufs; i++) {
938 rte_comp_op_free(ops_processed[i]);
939 ops_processed[i] = NULL;
942 /* Decompress data (either with Zlib API or compressdev API */
943 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
944 for (i = 0; i < num_bufs; i++) {
945 priv_data = (struct priv_op_data *)(ops[i] + 1);
946 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
947 const struct rte_comp_xform *decompress_xform =
948 decompress_xforms[xform_idx];
950 ret = decompress_zlib(ops[i], decompress_xform);
954 ops_processed[i] = ops[i];
957 /* Create decompress private xform data */
958 for (i = 0; i < num_xforms; i++) {
959 ret = rte_compressdev_private_xform_create(0,
960 (const struct rte_comp_xform *)decompress_xforms[i],
964 "Decompression private xform "
965 "could not be created\n");
971 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
972 /* Attach shareable private xform data to ops */
973 for (i = 0; i < num_bufs; i++) {
974 priv_data = (struct priv_op_data *)(ops[i] + 1);
975 uint16_t xform_idx = priv_data->orig_idx %
977 ops[i]->private_xform = priv_xforms[xform_idx];
980 /* Create rest of the private xforms for the other ops */
981 for (i = num_xforms; i < num_bufs; i++) {
982 ret = rte_compressdev_private_xform_create(0,
983 decompress_xforms[i % num_xforms],
987 "Decompression private xform "
988 "could not be created\n");
994 /* Attach non shareable private xform data to ops */
995 for (i = 0; i < num_bufs; i++) {
996 priv_data = (struct priv_op_data *)(ops[i] + 1);
997 uint16_t xform_idx = priv_data->orig_idx;
998 ops[i]->private_xform = priv_xforms[xform_idx];
1002 /* Enqueue and dequeue all operations */
1003 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1004 if (num_enqd < num_bufs) {
1006 "The operations could not be enqueued\n");
1013 * If retrying a dequeue call, wait for 10 ms to allow
1014 * enough time to the driver to process the operations
1016 if (deqd_retries != 0) {
1018 * Avoid infinite loop if not all the
1019 * operations get out of the device
1021 if (deqd_retries == MAX_DEQD_RETRIES) {
1023 "Not all operations could be "
1027 usleep(DEQUEUE_WAIT_TIME);
1029 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1030 &ops_processed[num_total_deqd], num_bufs);
1031 num_total_deqd += num_deqd;
1033 } while (num_total_deqd < num_enqd);
1038 for (i = 0; i < num_bufs; i++) {
1039 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1040 RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
1041 buf_idx[priv_data->orig_idx],
1042 ops_processed[i]->consumed, ops_processed[i]->produced);
1047 * Check operation status and free source mbuf (destination mbuf and
1048 * compress operation information is still needed)
1050 for (i = 0; i < num_bufs; i++) {
1051 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1053 "Some operations were not successful\n");
1056 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1057 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1058 comp_bufs[priv_data->orig_idx] = NULL;
1062 * Compare the original stream with the decompressed stream
1063 * (in size and the data)
1065 for (i = 0; i < num_bufs; i++) {
1066 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1067 const char *buf1 = test_bufs[priv_data->orig_idx];
1069 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1070 if (contig_buf == NULL) {
1071 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1076 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1077 ops_processed[i]->produced, contig_buf);
1079 if (compare_buffers(buf1, strlen(buf1) + 1,
1080 buf2, ops_processed[i]->produced) < 0)
1083 rte_free(contig_buf);
1090 /* Free resources */
1091 for (i = 0; i < num_bufs; i++) {
1092 rte_pktmbuf_free(uncomp_bufs[i]);
1093 rte_pktmbuf_free(comp_bufs[i]);
1094 rte_comp_op_free(ops[i]);
1095 rte_comp_op_free(ops_processed[i]);
1097 for (i = 0; i < num_priv_xforms; i++) {
1098 if (priv_xforms[i] != NULL)
1099 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1101 rte_free(contig_buf);
1107 test_compressdev_deflate_stateless_fixed(void)
1109 struct comp_testsuite_params *ts_params = &testsuite_params;
1110 const char *test_buffer;
1113 const struct rte_compressdev_capabilities *capab;
1115 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1116 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1118 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1121 struct rte_comp_xform *compress_xform =
1122 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1124 if (compress_xform == NULL) {
1126 "Compress xform could not be created\n");
1131 memcpy(compress_xform, ts_params->def_comp_xform,
1132 sizeof(struct rte_comp_xform));
1133 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1135 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1136 test_buffer = compress_test_bufs[i];
1138 /* Compress with compressdev, decompress with Zlib */
1139 if (test_deflate_comp_decomp(&test_buffer, 1,
1142 &ts_params->def_decomp_xform,
1144 RTE_COMP_OP_STATELESS,
1146 ZLIB_DECOMPRESS) < 0) {
1151 /* Compress with Zlib, decompress with compressdev */
1152 if (test_deflate_comp_decomp(&test_buffer, 1,
1155 &ts_params->def_decomp_xform,
1157 RTE_COMP_OP_STATELESS,
1159 ZLIB_COMPRESS) < 0) {
1168 rte_free(compress_xform);
1173 test_compressdev_deflate_stateless_dynamic(void)
1175 struct comp_testsuite_params *ts_params = &testsuite_params;
1176 const char *test_buffer;
1179 struct rte_comp_xform *compress_xform =
1180 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1182 const struct rte_compressdev_capabilities *capab;
1184 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1185 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1187 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1190 if (compress_xform == NULL) {
1192 "Compress xform could not be created\n");
1197 memcpy(compress_xform, ts_params->def_comp_xform,
1198 sizeof(struct rte_comp_xform));
1199 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1201 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1202 test_buffer = compress_test_bufs[i];
1204 /* Compress with compressdev, decompress with Zlib */
1205 if (test_deflate_comp_decomp(&test_buffer, 1,
1208 &ts_params->def_decomp_xform,
1210 RTE_COMP_OP_STATELESS,
1212 ZLIB_DECOMPRESS) < 0) {
1217 /* Compress with Zlib, decompress with compressdev */
1218 if (test_deflate_comp_decomp(&test_buffer, 1,
1221 &ts_params->def_decomp_xform,
1223 RTE_COMP_OP_STATELESS,
1225 ZLIB_COMPRESS) < 0) {
1234 rte_free(compress_xform);
1239 test_compressdev_deflate_stateless_multi_op(void)
1241 struct comp_testsuite_params *ts_params = &testsuite_params;
1242 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1243 uint16_t buf_idx[num_bufs];
1246 for (i = 0; i < num_bufs; i++)
1249 /* Compress with compressdev, decompress with Zlib */
1250 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1252 &ts_params->def_comp_xform,
1253 &ts_params->def_decomp_xform,
1255 RTE_COMP_OP_STATELESS,
1257 ZLIB_DECOMPRESS) < 0)
1260 /* Compress with Zlib, decompress with compressdev */
1261 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1263 &ts_params->def_comp_xform,
1264 &ts_params->def_decomp_xform,
1266 RTE_COMP_OP_STATELESS,
1271 return TEST_SUCCESS;
1275 test_compressdev_deflate_stateless_multi_level(void)
1277 struct comp_testsuite_params *ts_params = &testsuite_params;
1278 const char *test_buffer;
1282 struct rte_comp_xform *compress_xform =
1283 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1285 if (compress_xform == NULL) {
1287 "Compress xform could not be created\n");
1292 memcpy(compress_xform, ts_params->def_comp_xform,
1293 sizeof(struct rte_comp_xform));
1295 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1296 test_buffer = compress_test_bufs[i];
1297 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1299 compress_xform->compress.level = level;
1300 /* Compress with compressdev, decompress with Zlib */
1301 if (test_deflate_comp_decomp(&test_buffer, 1,
1304 &ts_params->def_decomp_xform,
1306 RTE_COMP_OP_STATELESS,
1308 ZLIB_DECOMPRESS) < 0) {
1318 rte_free(compress_xform);
1322 #define NUM_XFORMS 3
1324 test_compressdev_deflate_stateless_multi_xform(void)
1326 struct comp_testsuite_params *ts_params = &testsuite_params;
1327 uint16_t num_bufs = NUM_XFORMS;
1328 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1329 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1330 const char *test_buffers[NUM_XFORMS];
1332 unsigned int level = RTE_COMP_LEVEL_MIN;
1333 uint16_t buf_idx[num_bufs];
1337 /* Create multiple xforms with various levels */
1338 for (i = 0; i < NUM_XFORMS; i++) {
1339 compress_xforms[i] = rte_malloc(NULL,
1340 sizeof(struct rte_comp_xform), 0);
1341 if (compress_xforms[i] == NULL) {
1343 "Compress xform could not be created\n");
1348 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1349 sizeof(struct rte_comp_xform));
1350 compress_xforms[i]->compress.level = level;
1353 decompress_xforms[i] = rte_malloc(NULL,
1354 sizeof(struct rte_comp_xform), 0);
1355 if (decompress_xforms[i] == NULL) {
1357 "Decompress xform could not be created\n");
1362 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1363 sizeof(struct rte_comp_xform));
1366 for (i = 0; i < NUM_XFORMS; i++) {
1368 /* Use the same buffer in all sessions */
1369 test_buffers[i] = compress_test_bufs[0];
1371 /* Compress with compressdev, decompress with Zlib */
1372 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1377 RTE_COMP_OP_STATELESS,
1379 ZLIB_DECOMPRESS) < 0) {
1386 for (i = 0; i < NUM_XFORMS; i++) {
1387 rte_free(compress_xforms[i]);
1388 rte_free(decompress_xforms[i]);
1395 test_compressdev_deflate_stateless_sgl(void)
1397 struct comp_testsuite_params *ts_params = &testsuite_params;
1399 const char *test_buffer;
1400 const struct rte_compressdev_capabilities *capab;
1402 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1403 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1405 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1408 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1409 test_buffer = compress_test_bufs[i];
1410 /* Compress with compressdev, decompress with Zlib */
1411 if (test_deflate_comp_decomp(&test_buffer, 1,
1413 &ts_params->def_comp_xform,
1414 &ts_params->def_decomp_xform,
1416 RTE_COMP_OP_STATELESS,
1418 ZLIB_DECOMPRESS) < 0)
1421 /* Compress with Zlib, decompress with compressdev */
1422 if (test_deflate_comp_decomp(&test_buffer, 1,
1424 &ts_params->def_comp_xform,
1425 &ts_params->def_decomp_xform,
1427 RTE_COMP_OP_STATELESS,
1433 return TEST_SUCCESS;
1436 static struct unit_test_suite compressdev_testsuite = {
1437 .suite_name = "compressdev unit test suite",
1438 .setup = testsuite_setup,
1439 .teardown = testsuite_teardown,
1440 .unit_test_cases = {
1441 TEST_CASE_ST(NULL, NULL,
1442 test_compressdev_invalid_configuration),
1443 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1444 test_compressdev_deflate_stateless_fixed),
1445 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1446 test_compressdev_deflate_stateless_dynamic),
1447 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1448 test_compressdev_deflate_stateless_multi_op),
1449 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1450 test_compressdev_deflate_stateless_multi_level),
1451 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1452 test_compressdev_deflate_stateless_multi_xform),
1453 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1454 test_compressdev_deflate_stateless_sgl),
1455 TEST_CASES_END() /**< NULL terminate unit test array */
1460 test_compressdev(void)
1462 return unit_test_suite_runner(&compressdev_testsuite);
1465 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);