1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
15 #include "test_compressdev_test_buffer.h"
18 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
26 * 30% extra size for compressed data compared to original data,
27 * in case data size cannot be reduced and it is actually bigger
28 * due to the compress block headers
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
40 huffman_type_strings[] = {
41 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
42 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
43 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
57 struct comp_testsuite_params {
58 struct rte_mempool *large_mbuf_pool;
59 struct rte_mempool *small_mbuf_pool;
60 struct rte_mempool *op_pool;
61 struct rte_comp_xform *def_comp_xform;
62 struct rte_comp_xform *def_decomp_xform;
65 static struct comp_testsuite_params testsuite_params = { 0 };
68 testsuite_teardown(void)
70 struct comp_testsuite_params *ts_params = &testsuite_params;
72 rte_mempool_free(ts_params->large_mbuf_pool);
73 rte_mempool_free(ts_params->small_mbuf_pool);
74 rte_mempool_free(ts_params->op_pool);
75 rte_free(ts_params->def_comp_xform);
76 rte_free(ts_params->def_decomp_xform);
82 struct comp_testsuite_params *ts_params = &testsuite_params;
83 uint32_t max_buf_size = 0;
86 if (rte_compressdev_count() == 0) {
87 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
91 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
92 rte_compressdev_name_get(0));
94 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
95 max_buf_size = RTE_MAX(max_buf_size,
96 strlen(compress_test_bufs[i]) + 1);
99 * Buffers to be used in compression and decompression.
100 * Since decompressed data might be larger than
101 * compressed data (due to block header),
102 * buffers should be big enough for both cases.
104 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
105 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
108 max_buf_size + RTE_PKTMBUF_HEADROOM,
110 if (ts_params->large_mbuf_pool == NULL) {
111 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
115 /* Create mempool with smaller buffers for SGL testing */
116 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
117 NUM_LARGE_MBUFS * MAX_SEGS,
119 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
121 if (ts_params->small_mbuf_pool == NULL) {
122 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
126 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
127 0, sizeof(struct priv_op_data),
129 if (ts_params->op_pool == NULL) {
130 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
134 ts_params->def_comp_xform =
135 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
136 if (ts_params->def_comp_xform == NULL) {
138 "Default compress xform could not be created\n");
141 ts_params->def_decomp_xform =
142 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143 if (ts_params->def_decomp_xform == NULL) {
145 "Default decompress xform could not be created\n");
149 /* Initializes default values for compress/decompress xforms */
150 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
151 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
152 ts_params->def_comp_xform->compress.deflate.huffman =
153 RTE_COMP_HUFFMAN_DEFAULT;
154 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
155 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
156 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
158 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
159 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
160 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
161 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
166 testsuite_teardown();
172 generic_ut_setup(void)
174 /* Configure compressdev (one device, one queue pair) */
175 struct rte_compressdev_config config = {
176 .socket_id = rte_socket_id(),
178 .max_nb_priv_xforms = NUM_MAX_XFORMS,
182 if (rte_compressdev_configure(0, &config) < 0) {
183 RTE_LOG(ERR, USER1, "Device configuration failed\n");
187 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
188 rte_socket_id()) < 0) {
189 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
193 if (rte_compressdev_start(0) < 0) {
194 RTE_LOG(ERR, USER1, "Device could not be started\n");
202 generic_ut_teardown(void)
204 rte_compressdev_stop(0);
205 if (rte_compressdev_close(0) < 0)
206 RTE_LOG(ERR, USER1, "Device could not be closed\n");
210 test_compressdev_invalid_configuration(void)
212 struct rte_compressdev_config invalid_config;
213 struct rte_compressdev_config valid_config = {
214 .socket_id = rte_socket_id(),
216 .max_nb_priv_xforms = NUM_MAX_XFORMS,
219 struct rte_compressdev_info dev_info;
221 /* Invalid configuration with 0 queue pairs */
222 memcpy(&invalid_config, &valid_config,
223 sizeof(struct rte_compressdev_config));
224 invalid_config.nb_queue_pairs = 0;
226 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
227 "Device configuration was successful "
228 "with no queue pairs (invalid)\n");
231 * Invalid configuration with too many queue pairs
232 * (if there is an actual maximum number of queue pairs)
234 rte_compressdev_info_get(0, &dev_info);
235 if (dev_info.max_nb_queue_pairs != 0) {
236 memcpy(&invalid_config, &valid_config,
237 sizeof(struct rte_compressdev_config));
238 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
240 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
241 "Device configuration was successful "
242 "with too many queue pairs (invalid)\n");
245 /* Invalid queue pair setup, with no number of queue pairs set */
246 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
247 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
248 "Queue pair setup was successful "
249 "with no queue pairs set (invalid)\n");
255 compare_buffers(const char *buffer1, uint32_t buffer1_len,
256 const char *buffer2, uint32_t buffer2_len)
258 if (buffer1_len != buffer2_len) {
259 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
263 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
264 RTE_LOG(ERR, USER1, "Buffers are different\n");
272 * Maps compressdev and Zlib flush flags
275 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
278 case RTE_COMP_FLUSH_NONE:
280 case RTE_COMP_FLUSH_SYNC:
282 case RTE_COMP_FLUSH_FULL:
284 case RTE_COMP_FLUSH_FINAL:
287 * There should be only the values above,
288 * so this should never happen
296 compress_zlib(struct rte_comp_op *op,
297 const struct rte_comp_xform *xform, int mem_level)
301 int strategy, window_bits, comp_level;
302 int ret = TEST_FAILED;
303 uint8_t *single_src_buf = NULL;
304 uint8_t *single_dst_buf = NULL;
306 /* initialize zlib stream */
307 stream.zalloc = Z_NULL;
308 stream.zfree = Z_NULL;
309 stream.opaque = Z_NULL;
311 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
314 strategy = Z_DEFAULT_STRATEGY;
317 * Window bits is the base two logarithm of the window size (in bytes).
318 * When doing raw DEFLATE, this number will be negative.
320 window_bits = -(xform->compress.window_size);
322 comp_level = xform->compress.level;
324 if (comp_level != RTE_COMP_LEVEL_NONE)
325 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
326 window_bits, mem_level, strategy);
328 ret = deflateInit(&stream, Z_NO_COMPRESSION);
331 printf("Zlib deflate could not be initialized\n");
335 /* Assuming stateless operation */
337 if (op->m_src->nb_segs > 1) {
338 single_src_buf = rte_malloc(NULL,
339 rte_pktmbuf_pkt_len(op->m_src), 0);
340 if (single_src_buf == NULL) {
341 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
344 single_dst_buf = rte_malloc(NULL,
345 rte_pktmbuf_pkt_len(op->m_dst), 0);
346 if (single_dst_buf == NULL) {
347 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
350 if (rte_pktmbuf_read(op->m_src, 0,
351 rte_pktmbuf_pkt_len(op->m_src),
352 single_src_buf) == NULL) {
354 "Buffer could not be read entirely\n");
358 stream.avail_in = op->src.length;
359 stream.next_in = single_src_buf;
360 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
361 stream.next_out = single_dst_buf;
364 stream.avail_in = op->src.length;
365 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
366 stream.avail_out = op->m_dst->data_len;
367 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
369 /* Stateless operation, all buffer will be compressed in one go */
370 zlib_flush = map_zlib_flush_flag(op->flush_flag);
371 ret = deflate(&stream, zlib_flush);
373 if (stream.avail_in != 0) {
374 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
378 if (ret != Z_STREAM_END)
381 /* Copy data to destination SGL */
382 if (op->m_src->nb_segs > 1) {
383 uint32_t remaining_data = stream.total_out;
384 uint8_t *src_data = single_dst_buf;
385 struct rte_mbuf *dst_buf = op->m_dst;
387 while (remaining_data > 0) {
388 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
391 if (remaining_data < dst_buf->data_len) {
392 memcpy(dst_data, src_data, remaining_data);
395 memcpy(dst_data, src_data, dst_buf->data_len);
396 remaining_data -= dst_buf->data_len;
397 src_data += dst_buf->data_len;
398 dst_buf = dst_buf->next;
403 op->consumed = stream.total_in;
404 op->produced = stream.total_out;
405 op->status = RTE_COMP_OP_STATUS_SUCCESS;
407 deflateReset(&stream);
412 rte_free(single_src_buf);
413 rte_free(single_dst_buf);
419 decompress_zlib(struct rte_comp_op *op,
420 const struct rte_comp_xform *xform)
425 int ret = TEST_FAILED;
426 uint8_t *single_src_buf = NULL;
427 uint8_t *single_dst_buf = NULL;
429 /* initialize zlib stream */
430 stream.zalloc = Z_NULL;
431 stream.zfree = Z_NULL;
432 stream.opaque = Z_NULL;
435 * Window bits is the base two logarithm of the window size (in bytes).
436 * When doing raw DEFLATE, this number will be negative.
438 window_bits = -(xform->decompress.window_size);
440 ret = inflateInit2(&stream, window_bits);
443 printf("Zlib deflate could not be initialized\n");
447 /* Assuming stateless operation */
449 if (op->m_src->nb_segs > 1) {
450 single_src_buf = rte_malloc(NULL,
451 rte_pktmbuf_pkt_len(op->m_src), 0);
452 if (single_src_buf == NULL) {
453 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
456 single_dst_buf = rte_malloc(NULL,
457 rte_pktmbuf_pkt_len(op->m_dst), 0);
458 if (single_dst_buf == NULL) {
459 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
462 if (rte_pktmbuf_read(op->m_src, 0,
463 rte_pktmbuf_pkt_len(op->m_src),
464 single_src_buf) == NULL) {
466 "Buffer could not be read entirely\n");
470 stream.avail_in = op->src.length;
471 stream.next_in = single_src_buf;
472 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
473 stream.next_out = single_dst_buf;
476 stream.avail_in = op->src.length;
477 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
478 stream.avail_out = op->m_dst->data_len;
479 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
482 /* Stateless operation, all buffer will be compressed in one go */
483 zlib_flush = map_zlib_flush_flag(op->flush_flag);
484 ret = inflate(&stream, zlib_flush);
486 if (stream.avail_in != 0) {
487 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
491 if (ret != Z_STREAM_END)
494 if (op->m_src->nb_segs > 1) {
495 uint32_t remaining_data = stream.total_out;
496 uint8_t *src_data = single_dst_buf;
497 struct rte_mbuf *dst_buf = op->m_dst;
499 while (remaining_data > 0) {
500 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
503 if (remaining_data < dst_buf->data_len) {
504 memcpy(dst_data, src_data, remaining_data);
507 memcpy(dst_data, src_data, dst_buf->data_len);
508 remaining_data -= dst_buf->data_len;
509 src_data += dst_buf->data_len;
510 dst_buf = dst_buf->next;
515 op->consumed = stream.total_in;
516 op->produced = stream.total_out;
517 op->status = RTE_COMP_OP_STATUS_SUCCESS;
519 inflateReset(&stream);
529 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
530 uint32_t total_data_size,
531 struct rte_mempool *small_mbuf_pool,
532 struct rte_mempool *large_mbuf_pool,
533 uint8_t limit_segs_in_sgl)
535 uint32_t remaining_data = total_data_size;
536 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
537 struct rte_mempool *pool;
538 struct rte_mbuf *next_seg;
541 const char *data_ptr = test_buf;
545 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
546 num_remaining_segs = limit_segs_in_sgl - 1;
549 * Allocate data in the first segment (header) and
550 * copy data if test buffer is provided
552 if (remaining_data < SMALL_SEG_SIZE)
553 data_size = remaining_data;
555 data_size = SMALL_SEG_SIZE;
556 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
557 if (buf_ptr == NULL) {
559 "Not enough space in the 1st buffer\n");
563 if (data_ptr != NULL) {
564 /* Copy characters without NULL terminator */
565 strncpy(buf_ptr, data_ptr, data_size);
566 data_ptr += data_size;
568 remaining_data -= data_size;
569 num_remaining_segs--;
572 * Allocate the rest of the segments,
573 * copy the rest of the data and chain the segments.
575 for (i = 0; i < num_remaining_segs; i++) {
577 if (i == (num_remaining_segs - 1)) {
579 if (remaining_data > SMALL_SEG_SIZE)
580 pool = large_mbuf_pool;
582 pool = small_mbuf_pool;
583 data_size = remaining_data;
585 data_size = SMALL_SEG_SIZE;
586 pool = small_mbuf_pool;
589 next_seg = rte_pktmbuf_alloc(pool);
590 if (next_seg == NULL) {
592 "New segment could not be allocated "
593 "from the mempool\n");
596 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
597 if (buf_ptr == NULL) {
599 "Not enough space in the buffer\n");
600 rte_pktmbuf_free(next_seg);
603 if (data_ptr != NULL) {
604 /* Copy characters without NULL terminator */
605 strncpy(buf_ptr, data_ptr, data_size);
606 data_ptr += data_size;
608 remaining_data -= data_size;
610 ret = rte_pktmbuf_chain(head_buf, next_seg);
612 rte_pktmbuf_free(next_seg);
614 "Segment could not chained\n");
623 * Compresses and decompresses buffer with compressdev API and Zlib API
626 test_deflate_comp_decomp(const char * const test_bufs[],
627 unsigned int num_bufs,
629 struct rte_comp_xform *compress_xforms[],
630 struct rte_comp_xform *decompress_xforms[],
631 unsigned int num_xforms,
632 enum rte_comp_op_type state,
634 enum zlib_direction zlib_dir)
636 struct comp_testsuite_params *ts_params = &testsuite_params;
639 struct rte_mbuf *uncomp_bufs[num_bufs];
640 struct rte_mbuf *comp_bufs[num_bufs];
641 struct rte_comp_op *ops[num_bufs];
642 struct rte_comp_op *ops_processed[num_bufs];
643 void *priv_xforms[num_bufs];
644 uint16_t num_enqd, num_deqd, num_total_deqd;
645 uint16_t num_priv_xforms = 0;
646 unsigned int deqd_retries = 0;
647 struct priv_op_data *priv_data;
650 struct rte_mempool *buf_pool;
652 const struct rte_compressdev_capabilities *capa =
653 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
654 char *contig_buf = NULL;
656 /* Initialize all arrays to NULL */
657 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
658 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
659 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
660 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
661 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
664 buf_pool = ts_params->small_mbuf_pool;
666 buf_pool = ts_params->large_mbuf_pool;
668 /* Prepare the source mbufs with the data */
669 ret = rte_pktmbuf_alloc_bulk(buf_pool,
670 uncomp_bufs, num_bufs);
673 "Source mbufs could not be allocated "
674 "from the mempool\n");
679 for (i = 0; i < num_bufs; i++) {
680 data_size = strlen(test_bufs[i]) + 1;
681 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
683 ts_params->small_mbuf_pool,
684 ts_params->large_mbuf_pool,
689 for (i = 0; i < num_bufs; i++) {
690 data_size = strlen(test_bufs[i]) + 1;
691 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
692 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
696 /* Prepare the destination mbufs */
697 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
700 "Destination mbufs could not be allocated "
701 "from the mempool\n");
706 for (i = 0; i < num_bufs; i++) {
707 data_size = strlen(test_bufs[i]) *
708 COMPRESS_BUF_SIZE_RATIO;
709 if (prepare_sgl_bufs(NULL, comp_bufs[i],
711 ts_params->small_mbuf_pool,
712 ts_params->large_mbuf_pool,
718 for (i = 0; i < num_bufs; i++) {
719 data_size = strlen(test_bufs[i]) *
720 COMPRESS_BUF_SIZE_RATIO;
721 rte_pktmbuf_append(comp_bufs[i], data_size);
725 /* Build the compression operations */
726 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
729 "Compress operations could not be allocated "
730 "from the mempool\n");
734 for (i = 0; i < num_bufs; i++) {
735 ops[i]->m_src = uncomp_bufs[i];
736 ops[i]->m_dst = comp_bufs[i];
737 ops[i]->src.offset = 0;
738 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
739 ops[i]->dst.offset = 0;
740 if (state == RTE_COMP_OP_STATELESS) {
741 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
744 "Stateful operations are not supported "
745 "in these tests yet\n");
748 ops[i]->input_chksum = 0;
750 * Store original operation index in private data,
751 * since ordering does not have to be maintained,
752 * when dequeueing from compressdev, so a comparison
753 * at the end of the test can be done.
755 priv_data = (struct priv_op_data *) (ops[i] + 1);
756 priv_data->orig_idx = i;
759 /* Compress data (either with Zlib API or compressdev API */
760 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
761 for (i = 0; i < num_bufs; i++) {
762 const struct rte_comp_xform *compress_xform =
763 compress_xforms[i % num_xforms];
764 ret = compress_zlib(ops[i], compress_xform,
769 ops_processed[i] = ops[i];
772 /* Create compress private xform data */
773 for (i = 0; i < num_xforms; i++) {
774 ret = rte_compressdev_private_xform_create(0,
775 (const struct rte_comp_xform *)compress_xforms[i],
779 "Compression private xform "
780 "could not be created\n");
786 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
787 /* Attach shareable private xform data to ops */
788 for (i = 0; i < num_bufs; i++)
789 ops[i]->private_xform = priv_xforms[i % num_xforms];
791 /* Create rest of the private xforms for the other ops */
792 for (i = num_xforms; i < num_bufs; i++) {
793 ret = rte_compressdev_private_xform_create(0,
794 compress_xforms[i % num_xforms],
798 "Compression private xform "
799 "could not be created\n");
805 /* Attach non shareable private xform data to ops */
806 for (i = 0; i < num_bufs; i++)
807 ops[i]->private_xform = priv_xforms[i];
810 /* Enqueue and dequeue all operations */
811 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
812 if (num_enqd < num_bufs) {
814 "The operations could not be enqueued\n");
821 * If retrying a dequeue call, wait for 10 ms to allow
822 * enough time to the driver to process the operations
824 if (deqd_retries != 0) {
826 * Avoid infinite loop if not all the
827 * operations get out of the device
829 if (deqd_retries == MAX_DEQD_RETRIES) {
831 "Not all operations could be "
835 usleep(DEQUEUE_WAIT_TIME);
837 num_deqd = rte_compressdev_dequeue_burst(0, 0,
838 &ops_processed[num_total_deqd], num_bufs);
839 num_total_deqd += num_deqd;
841 } while (num_total_deqd < num_enqd);
845 /* Free compress private xforms */
846 for (i = 0; i < num_priv_xforms; i++) {
847 rte_compressdev_private_xform_free(0, priv_xforms[i]);
848 priv_xforms[i] = NULL;
853 for (i = 0; i < num_bufs; i++) {
854 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
855 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
856 const struct rte_comp_compress_xform *compress_xform =
857 &compress_xforms[xform_idx]->compress;
858 enum rte_comp_huffman huffman_type =
859 compress_xform->deflate.huffman;
860 char engine[] = "zlib (directly, not PMD)";
861 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
862 strlcpy(engine, "PMD", sizeof(engine));
864 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
865 " %u bytes (level = %d, huffman = %s)\n",
866 buf_idx[priv_data->orig_idx], engine,
867 ops_processed[i]->consumed, ops_processed[i]->produced,
868 compress_xform->level,
869 huffman_type_strings[huffman_type]);
870 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
871 ops_processed[i]->consumed == 0 ? 0 :
872 (float)ops_processed[i]->produced /
873 ops_processed[i]->consumed * 100);
878 * Check operation status and free source mbufs (destination mbuf and
879 * compress operation information is needed for the decompression stage)
881 for (i = 0; i < num_bufs; i++) {
882 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
884 "Some operations were not successful\n");
887 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
888 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
889 uncomp_bufs[priv_data->orig_idx] = NULL;
892 /* Allocate buffers for decompressed data */
893 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
896 "Destination mbufs could not be allocated "
897 "from the mempool\n");
902 for (i = 0; i < num_bufs; i++) {
903 priv_data = (struct priv_op_data *)
904 (ops_processed[i] + 1);
905 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
906 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
908 ts_params->small_mbuf_pool,
909 ts_params->large_mbuf_pool,
915 for (i = 0; i < num_bufs; i++) {
916 priv_data = (struct priv_op_data *)
917 (ops_processed[i] + 1);
918 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
919 rte_pktmbuf_append(uncomp_bufs[i], data_size);
923 /* Build the decompression operations */
924 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
927 "Decompress operations could not be allocated "
928 "from the mempool\n");
932 /* Source buffer is the compressed data from the previous operations */
933 for (i = 0; i < num_bufs; i++) {
934 ops[i]->m_src = ops_processed[i]->m_dst;
935 ops[i]->m_dst = uncomp_bufs[i];
936 ops[i]->src.offset = 0;
938 * Set the length of the compressed data to the
939 * number of bytes that were produced in the previous stage
941 ops[i]->src.length = ops_processed[i]->produced;
942 ops[i]->dst.offset = 0;
943 if (state == RTE_COMP_OP_STATELESS) {
944 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
947 "Stateful operations are not supported "
948 "in these tests yet\n");
951 ops[i]->input_chksum = 0;
953 * Copy private data from previous operations,
954 * to keep the pointer to the original buffer
956 memcpy(ops[i] + 1, ops_processed[i] + 1,
957 sizeof(struct priv_op_data));
961 * Free the previous compress operations,
962 * as it is not needed anymore
964 for (i = 0; i < num_bufs; i++) {
965 rte_comp_op_free(ops_processed[i]);
966 ops_processed[i] = NULL;
969 /* Decompress data (either with Zlib API or compressdev API */
970 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
971 for (i = 0; i < num_bufs; i++) {
972 priv_data = (struct priv_op_data *)(ops[i] + 1);
973 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
974 const struct rte_comp_xform *decompress_xform =
975 decompress_xforms[xform_idx];
977 ret = decompress_zlib(ops[i], decompress_xform);
981 ops_processed[i] = ops[i];
984 /* Create decompress private xform data */
985 for (i = 0; i < num_xforms; i++) {
986 ret = rte_compressdev_private_xform_create(0,
987 (const struct rte_comp_xform *)decompress_xforms[i],
991 "Decompression private xform "
992 "could not be created\n");
998 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
999 /* Attach shareable private xform data to ops */
1000 for (i = 0; i < num_bufs; i++) {
1001 priv_data = (struct priv_op_data *)(ops[i] + 1);
1002 uint16_t xform_idx = priv_data->orig_idx %
1004 ops[i]->private_xform = priv_xforms[xform_idx];
1007 /* Create rest of the private xforms for the other ops */
1008 for (i = num_xforms; i < num_bufs; i++) {
1009 ret = rte_compressdev_private_xform_create(0,
1010 decompress_xforms[i % num_xforms],
1014 "Decompression private xform "
1015 "could not be created\n");
1021 /* Attach non shareable private xform data to ops */
1022 for (i = 0; i < num_bufs; i++) {
1023 priv_data = (struct priv_op_data *)(ops[i] + 1);
1024 uint16_t xform_idx = priv_data->orig_idx;
1025 ops[i]->private_xform = priv_xforms[xform_idx];
1029 /* Enqueue and dequeue all operations */
1030 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1031 if (num_enqd < num_bufs) {
1033 "The operations could not be enqueued\n");
1040 * If retrying a dequeue call, wait for 10 ms to allow
1041 * enough time to the driver to process the operations
1043 if (deqd_retries != 0) {
1045 * Avoid infinite loop if not all the
1046 * operations get out of the device
1048 if (deqd_retries == MAX_DEQD_RETRIES) {
1050 "Not all operations could be "
1054 usleep(DEQUEUE_WAIT_TIME);
1056 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1057 &ops_processed[num_total_deqd], num_bufs);
1058 num_total_deqd += num_deqd;
1060 } while (num_total_deqd < num_enqd);
1065 for (i = 0; i < num_bufs; i++) {
1066 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1067 char engine[] = "zlib, (directly, no PMD)";
1068 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1069 strlcpy(engine, "pmd", sizeof(engine));
1070 RTE_LOG(DEBUG, USER1,
1071 "Buffer %u decompressed by %s from %u to %u bytes\n",
1072 buf_idx[priv_data->orig_idx], engine,
1073 ops_processed[i]->consumed, ops_processed[i]->produced);
1078 * Check operation status and free source mbuf (destination mbuf and
1079 * compress operation information is still needed)
1081 for (i = 0; i < num_bufs; i++) {
1082 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1084 "Some operations were not successful\n");
1087 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1088 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1089 comp_bufs[priv_data->orig_idx] = NULL;
1093 * Compare the original stream with the decompressed stream
1094 * (in size and the data)
1096 for (i = 0; i < num_bufs; i++) {
1097 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1098 const char *buf1 = test_bufs[priv_data->orig_idx];
1100 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1101 if (contig_buf == NULL) {
1102 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1107 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1108 ops_processed[i]->produced, contig_buf);
1110 if (compare_buffers(buf1, strlen(buf1) + 1,
1111 buf2, ops_processed[i]->produced) < 0)
1114 rte_free(contig_buf);
1121 /* Free resources */
1122 for (i = 0; i < num_bufs; i++) {
1123 rte_pktmbuf_free(uncomp_bufs[i]);
1124 rte_pktmbuf_free(comp_bufs[i]);
1125 rte_comp_op_free(ops[i]);
1126 rte_comp_op_free(ops_processed[i]);
1128 for (i = 0; i < num_priv_xforms; i++) {
1129 if (priv_xforms[i] != NULL)
1130 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1132 rte_free(contig_buf);
1138 test_compressdev_deflate_stateless_fixed(void)
1140 struct comp_testsuite_params *ts_params = &testsuite_params;
1141 const char *test_buffer;
1144 const struct rte_compressdev_capabilities *capab;
1146 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1147 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1149 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1152 struct rte_comp_xform *compress_xform =
1153 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1155 if (compress_xform == NULL) {
1157 "Compress xform could not be created\n");
1162 memcpy(compress_xform, ts_params->def_comp_xform,
1163 sizeof(struct rte_comp_xform));
1164 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1166 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1167 test_buffer = compress_test_bufs[i];
1169 /* Compress with compressdev, decompress with Zlib */
1170 if (test_deflate_comp_decomp(&test_buffer, 1,
1173 &ts_params->def_decomp_xform,
1175 RTE_COMP_OP_STATELESS,
1177 ZLIB_DECOMPRESS) < 0) {
1182 /* Compress with Zlib, decompress with compressdev */
1183 if (test_deflate_comp_decomp(&test_buffer, 1,
1186 &ts_params->def_decomp_xform,
1188 RTE_COMP_OP_STATELESS,
1190 ZLIB_COMPRESS) < 0) {
1199 rte_free(compress_xform);
1204 test_compressdev_deflate_stateless_dynamic(void)
1206 struct comp_testsuite_params *ts_params = &testsuite_params;
1207 const char *test_buffer;
1210 struct rte_comp_xform *compress_xform =
1211 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1213 const struct rte_compressdev_capabilities *capab;
1215 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1216 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1218 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1221 if (compress_xform == NULL) {
1223 "Compress xform could not be created\n");
1228 memcpy(compress_xform, ts_params->def_comp_xform,
1229 sizeof(struct rte_comp_xform));
1230 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1232 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1233 test_buffer = compress_test_bufs[i];
1235 /* Compress with compressdev, decompress with Zlib */
1236 if (test_deflate_comp_decomp(&test_buffer, 1,
1239 &ts_params->def_decomp_xform,
1241 RTE_COMP_OP_STATELESS,
1243 ZLIB_DECOMPRESS) < 0) {
1248 /* Compress with Zlib, decompress with compressdev */
1249 if (test_deflate_comp_decomp(&test_buffer, 1,
1252 &ts_params->def_decomp_xform,
1254 RTE_COMP_OP_STATELESS,
1256 ZLIB_COMPRESS) < 0) {
1265 rte_free(compress_xform);
1270 test_compressdev_deflate_stateless_multi_op(void)
1272 struct comp_testsuite_params *ts_params = &testsuite_params;
1273 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1274 uint16_t buf_idx[num_bufs];
1277 for (i = 0; i < num_bufs; i++)
1280 /* Compress with compressdev, decompress with Zlib */
1281 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1283 &ts_params->def_comp_xform,
1284 &ts_params->def_decomp_xform,
1286 RTE_COMP_OP_STATELESS,
1288 ZLIB_DECOMPRESS) < 0)
1291 /* Compress with Zlib, decompress with compressdev */
1292 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1294 &ts_params->def_comp_xform,
1295 &ts_params->def_decomp_xform,
1297 RTE_COMP_OP_STATELESS,
1302 return TEST_SUCCESS;
1306 test_compressdev_deflate_stateless_multi_level(void)
1308 struct comp_testsuite_params *ts_params = &testsuite_params;
1309 const char *test_buffer;
1313 struct rte_comp_xform *compress_xform =
1314 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1316 if (compress_xform == NULL) {
1318 "Compress xform could not be created\n");
1323 memcpy(compress_xform, ts_params->def_comp_xform,
1324 sizeof(struct rte_comp_xform));
1326 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1327 test_buffer = compress_test_bufs[i];
1328 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1330 compress_xform->compress.level = level;
1331 /* Compress with compressdev, decompress with Zlib */
1332 if (test_deflate_comp_decomp(&test_buffer, 1,
1335 &ts_params->def_decomp_xform,
1337 RTE_COMP_OP_STATELESS,
1339 ZLIB_DECOMPRESS) < 0) {
1349 rte_free(compress_xform);
1353 #define NUM_XFORMS 3
1355 test_compressdev_deflate_stateless_multi_xform(void)
1357 struct comp_testsuite_params *ts_params = &testsuite_params;
1358 uint16_t num_bufs = NUM_XFORMS;
1359 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1360 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1361 const char *test_buffers[NUM_XFORMS];
1363 unsigned int level = RTE_COMP_LEVEL_MIN;
1364 uint16_t buf_idx[num_bufs];
1368 /* Create multiple xforms with various levels */
1369 for (i = 0; i < NUM_XFORMS; i++) {
1370 compress_xforms[i] = rte_malloc(NULL,
1371 sizeof(struct rte_comp_xform), 0);
1372 if (compress_xforms[i] == NULL) {
1374 "Compress xform could not be created\n");
1379 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1380 sizeof(struct rte_comp_xform));
1381 compress_xforms[i]->compress.level = level;
1384 decompress_xforms[i] = rte_malloc(NULL,
1385 sizeof(struct rte_comp_xform), 0);
1386 if (decompress_xforms[i] == NULL) {
1388 "Decompress xform could not be created\n");
1393 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1394 sizeof(struct rte_comp_xform));
1397 for (i = 0; i < NUM_XFORMS; i++) {
1399 /* Use the same buffer in all sessions */
1400 test_buffers[i] = compress_test_bufs[0];
1402 /* Compress with compressdev, decompress with Zlib */
1403 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1408 RTE_COMP_OP_STATELESS,
1410 ZLIB_DECOMPRESS) < 0) {
1417 for (i = 0; i < NUM_XFORMS; i++) {
1418 rte_free(compress_xforms[i]);
1419 rte_free(decompress_xforms[i]);
1426 test_compressdev_deflate_stateless_sgl(void)
1428 struct comp_testsuite_params *ts_params = &testsuite_params;
1430 const char *test_buffer;
1431 const struct rte_compressdev_capabilities *capab;
1433 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1434 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1436 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1439 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1440 test_buffer = compress_test_bufs[i];
1441 /* Compress with compressdev, decompress with Zlib */
1442 if (test_deflate_comp_decomp(&test_buffer, 1,
1444 &ts_params->def_comp_xform,
1445 &ts_params->def_decomp_xform,
1447 RTE_COMP_OP_STATELESS,
1449 ZLIB_DECOMPRESS) < 0)
1452 /* Compress with Zlib, decompress with compressdev */
1453 if (test_deflate_comp_decomp(&test_buffer, 1,
1455 &ts_params->def_comp_xform,
1456 &ts_params->def_decomp_xform,
1458 RTE_COMP_OP_STATELESS,
1464 return TEST_SUCCESS;
1467 static struct unit_test_suite compressdev_testsuite = {
1468 .suite_name = "compressdev unit test suite",
1469 .setup = testsuite_setup,
1470 .teardown = testsuite_teardown,
1471 .unit_test_cases = {
1472 TEST_CASE_ST(NULL, NULL,
1473 test_compressdev_invalid_configuration),
1474 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1475 test_compressdev_deflate_stateless_fixed),
1476 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1477 test_compressdev_deflate_stateless_dynamic),
1478 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1479 test_compressdev_deflate_stateless_multi_op),
1480 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1481 test_compressdev_deflate_stateless_multi_level),
1482 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1483 test_compressdev_deflate_stateless_multi_xform),
1484 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1485 test_compressdev_deflate_stateless_sgl),
1486 TEST_CASES_END() /**< NULL terminate unit test array */
1491 test_compressdev(void)
1493 return unit_test_suite_runner(&compressdev_testsuite);
1496 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);