1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
15 #include "test_compressdev_test_buffer.h"
18 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
26 * 30% extra size for compressed data compared to original data,
27 * in case data size cannot be reduced and it is actually bigger
28 * due to the compress block headers
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
40 huffman_type_strings[] = {
41 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
42 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
43 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
57 struct comp_testsuite_params {
58 struct rte_mempool *large_mbuf_pool;
59 struct rte_mempool *small_mbuf_pool;
60 struct rte_mempool *op_pool;
61 struct rte_comp_xform *def_comp_xform;
62 struct rte_comp_xform *def_decomp_xform;
65 static struct comp_testsuite_params testsuite_params = { 0 };
68 testsuite_teardown(void)
70 struct comp_testsuite_params *ts_params = &testsuite_params;
72 rte_mempool_free(ts_params->large_mbuf_pool);
73 rte_mempool_free(ts_params->small_mbuf_pool);
74 rte_mempool_free(ts_params->op_pool);
75 rte_free(ts_params->def_comp_xform);
76 rte_free(ts_params->def_decomp_xform);
82 struct comp_testsuite_params *ts_params = &testsuite_params;
83 uint32_t max_buf_size = 0;
86 if (rte_compressdev_count() == 0) {
87 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
91 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
92 rte_compressdev_name_get(0));
94 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
95 max_buf_size = RTE_MAX(max_buf_size,
96 strlen(compress_test_bufs[i]) + 1);
99 * Buffers to be used in compression and decompression.
100 * Since decompressed data might be larger than
101 * compressed data (due to block header),
102 * buffers should be big enough for both cases.
104 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
105 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
108 max_buf_size + RTE_PKTMBUF_HEADROOM,
110 if (ts_params->large_mbuf_pool == NULL) {
111 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
115 /* Create mempool with smaller buffers for SGL testing */
116 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
117 NUM_LARGE_MBUFS * MAX_SEGS,
119 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
121 if (ts_params->small_mbuf_pool == NULL) {
122 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
126 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
127 0, sizeof(struct priv_op_data),
129 if (ts_params->op_pool == NULL) {
130 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
134 ts_params->def_comp_xform =
135 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
136 if (ts_params->def_comp_xform == NULL) {
138 "Default compress xform could not be created\n");
141 ts_params->def_decomp_xform =
142 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143 if (ts_params->def_decomp_xform == NULL) {
145 "Default decompress xform could not be created\n");
149 /* Initializes default values for compress/decompress xforms */
150 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
151 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
152 ts_params->def_comp_xform->compress.deflate.huffman =
153 RTE_COMP_HUFFMAN_DEFAULT;
154 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
155 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
156 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
158 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
159 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
160 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
161 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
166 testsuite_teardown();
172 generic_ut_setup(void)
174 /* Configure compressdev (one device, one queue pair) */
175 struct rte_compressdev_config config = {
176 .socket_id = rte_socket_id(),
178 .max_nb_priv_xforms = NUM_MAX_XFORMS,
182 if (rte_compressdev_configure(0, &config) < 0) {
183 RTE_LOG(ERR, USER1, "Device configuration failed\n");
187 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
188 rte_socket_id()) < 0) {
189 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
193 if (rte_compressdev_start(0) < 0) {
194 RTE_LOG(ERR, USER1, "Device could not be started\n");
202 generic_ut_teardown(void)
204 rte_compressdev_stop(0);
205 if (rte_compressdev_close(0) < 0)
206 RTE_LOG(ERR, USER1, "Device could not be closed\n");
210 test_compressdev_invalid_configuration(void)
212 struct rte_compressdev_config invalid_config;
213 struct rte_compressdev_config valid_config = {
214 .socket_id = rte_socket_id(),
216 .max_nb_priv_xforms = NUM_MAX_XFORMS,
219 struct rte_compressdev_info dev_info;
221 /* Invalid configuration with 0 queue pairs */
222 memcpy(&invalid_config, &valid_config,
223 sizeof(struct rte_compressdev_config));
224 invalid_config.nb_queue_pairs = 0;
226 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
227 "Device configuration was successful "
228 "with no queue pairs (invalid)\n");
231 * Invalid configuration with too many queue pairs
232 * (if there is an actual maximum number of queue pairs)
234 rte_compressdev_info_get(0, &dev_info);
235 if (dev_info.max_nb_queue_pairs != 0) {
236 memcpy(&invalid_config, &valid_config,
237 sizeof(struct rte_compressdev_config));
238 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
240 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
241 "Device configuration was successful "
242 "with too many queue pairs (invalid)\n");
245 /* Invalid queue pair setup, with no number of queue pairs set */
246 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
247 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
248 "Queue pair setup was successful "
249 "with no queue pairs set (invalid)\n");
255 compare_buffers(const char *buffer1, uint32_t buffer1_len,
256 const char *buffer2, uint32_t buffer2_len)
258 if (buffer1_len != buffer2_len) {
259 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
263 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
264 RTE_LOG(ERR, USER1, "Buffers are different\n");
272 * Maps compressdev and Zlib flush flags
275 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
278 case RTE_COMP_FLUSH_NONE:
280 case RTE_COMP_FLUSH_SYNC:
282 case RTE_COMP_FLUSH_FULL:
284 case RTE_COMP_FLUSH_FINAL:
287 * There should be only the values above,
288 * so this should never happen
296 compress_zlib(struct rte_comp_op *op,
297 const struct rte_comp_xform *xform, int mem_level)
301 int strategy, window_bits, comp_level;
302 int ret = TEST_FAILED;
303 uint8_t *single_src_buf = NULL;
304 uint8_t *single_dst_buf = NULL;
306 /* initialize zlib stream */
307 stream.zalloc = Z_NULL;
308 stream.zfree = Z_NULL;
309 stream.opaque = Z_NULL;
311 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
314 strategy = Z_DEFAULT_STRATEGY;
317 * Window bits is the base two logarithm of the window size (in bytes).
318 * When doing raw DEFLATE, this number will be negative.
320 window_bits = -(xform->compress.window_size);
322 comp_level = xform->compress.level;
324 if (comp_level != RTE_COMP_LEVEL_NONE)
325 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
326 window_bits, mem_level, strategy);
328 ret = deflateInit(&stream, Z_NO_COMPRESSION);
331 printf("Zlib deflate could not be initialized\n");
335 /* Assuming stateless operation */
337 if (op->m_src->nb_segs > 1) {
338 single_src_buf = rte_malloc(NULL,
339 rte_pktmbuf_pkt_len(op->m_src), 0);
340 if (single_src_buf == NULL) {
341 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
344 single_dst_buf = rte_malloc(NULL,
345 rte_pktmbuf_pkt_len(op->m_dst), 0);
346 if (single_dst_buf == NULL) {
347 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
350 if (rte_pktmbuf_read(op->m_src, 0,
351 rte_pktmbuf_pkt_len(op->m_src),
352 single_src_buf) == NULL) {
354 "Buffer could not be read entirely\n");
358 stream.avail_in = op->src.length;
359 stream.next_in = single_src_buf;
360 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
361 stream.next_out = single_dst_buf;
364 stream.avail_in = op->src.length;
365 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
366 stream.avail_out = op->m_dst->data_len;
367 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
369 /* Stateless operation, all buffer will be compressed in one go */
370 zlib_flush = map_zlib_flush_flag(op->flush_flag);
371 ret = deflate(&stream, zlib_flush);
373 if (stream.avail_in != 0) {
374 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
378 if (ret != Z_STREAM_END)
381 /* Copy data to destination SGL */
382 if (op->m_src->nb_segs > 1) {
383 uint32_t remaining_data = stream.total_out;
384 uint8_t *src_data = single_dst_buf;
385 struct rte_mbuf *dst_buf = op->m_dst;
387 while (remaining_data > 0) {
388 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
391 if (remaining_data < dst_buf->data_len) {
392 memcpy(dst_data, src_data, remaining_data);
395 memcpy(dst_data, src_data, dst_buf->data_len);
396 remaining_data -= dst_buf->data_len;
397 src_data += dst_buf->data_len;
398 dst_buf = dst_buf->next;
403 op->consumed = stream.total_in;
404 op->produced = stream.total_out;
405 op->status = RTE_COMP_OP_STATUS_SUCCESS;
407 deflateReset(&stream);
412 rte_free(single_src_buf);
413 rte_free(single_dst_buf);
419 decompress_zlib(struct rte_comp_op *op,
420 const struct rte_comp_xform *xform)
425 int ret = TEST_FAILED;
426 uint8_t *single_src_buf = NULL;
427 uint8_t *single_dst_buf = NULL;
429 /* initialize zlib stream */
430 stream.zalloc = Z_NULL;
431 stream.zfree = Z_NULL;
432 stream.opaque = Z_NULL;
435 * Window bits is the base two logarithm of the window size (in bytes).
436 * When doing raw DEFLATE, this number will be negative.
438 window_bits = -(xform->decompress.window_size);
440 ret = inflateInit2(&stream, window_bits);
443 printf("Zlib deflate could not be initialized\n");
447 /* Assuming stateless operation */
449 if (op->m_src->nb_segs > 1) {
450 single_src_buf = rte_malloc(NULL,
451 rte_pktmbuf_pkt_len(op->m_src), 0);
452 if (single_src_buf == NULL) {
453 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
456 single_dst_buf = rte_malloc(NULL,
457 rte_pktmbuf_pkt_len(op->m_dst), 0);
458 if (single_dst_buf == NULL) {
459 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
462 if (rte_pktmbuf_read(op->m_src, 0,
463 rte_pktmbuf_pkt_len(op->m_src),
464 single_src_buf) == NULL) {
466 "Buffer could not be read entirely\n");
470 stream.avail_in = op->src.length;
471 stream.next_in = single_src_buf;
472 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
473 stream.next_out = single_dst_buf;
476 stream.avail_in = op->src.length;
477 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
478 stream.avail_out = op->m_dst->data_len;
479 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
482 /* Stateless operation, all buffer will be compressed in one go */
483 zlib_flush = map_zlib_flush_flag(op->flush_flag);
484 ret = inflate(&stream, zlib_flush);
486 if (stream.avail_in != 0) {
487 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
491 if (ret != Z_STREAM_END)
494 if (op->m_src->nb_segs > 1) {
495 uint32_t remaining_data = stream.total_out;
496 uint8_t *src_data = single_dst_buf;
497 struct rte_mbuf *dst_buf = op->m_dst;
499 while (remaining_data > 0) {
500 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
503 if (remaining_data < dst_buf->data_len) {
504 memcpy(dst_data, src_data, remaining_data);
507 memcpy(dst_data, src_data, dst_buf->data_len);
508 remaining_data -= dst_buf->data_len;
509 src_data += dst_buf->data_len;
510 dst_buf = dst_buf->next;
515 op->consumed = stream.total_in;
516 op->produced = stream.total_out;
517 op->status = RTE_COMP_OP_STATUS_SUCCESS;
519 inflateReset(&stream);
529 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
530 uint32_t total_data_size,
531 struct rte_mempool *small_mbuf_pool,
532 struct rte_mempool *large_mbuf_pool,
533 uint8_t limit_segs_in_sgl)
535 uint32_t remaining_data = total_data_size;
536 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
537 struct rte_mempool *pool;
538 struct rte_mbuf *next_seg;
541 const char *data_ptr = test_buf;
545 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
546 num_remaining_segs = limit_segs_in_sgl - 1;
549 * Allocate data in the first segment (header) and
550 * copy data if test buffer is provided
552 if (remaining_data < SMALL_SEG_SIZE)
553 data_size = remaining_data;
555 data_size = SMALL_SEG_SIZE;
556 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
557 if (buf_ptr == NULL) {
559 "Not enough space in the 1st buffer\n");
563 if (data_ptr != NULL) {
564 /* Copy characters without NULL terminator */
565 strncpy(buf_ptr, data_ptr, data_size);
566 data_ptr += data_size;
568 remaining_data -= data_size;
569 num_remaining_segs--;
572 * Allocate the rest of the segments,
573 * copy the rest of the data and chain the segments.
575 for (i = 0; i < num_remaining_segs; i++) {
577 if (i == (num_remaining_segs - 1)) {
579 if (remaining_data > SMALL_SEG_SIZE)
580 pool = large_mbuf_pool;
582 pool = small_mbuf_pool;
583 data_size = remaining_data;
585 data_size = SMALL_SEG_SIZE;
586 pool = small_mbuf_pool;
589 next_seg = rte_pktmbuf_alloc(pool);
590 if (next_seg == NULL) {
592 "New segment could not be allocated "
593 "from the mempool\n");
596 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
597 if (buf_ptr == NULL) {
599 "Not enough space in the buffer\n");
600 rte_pktmbuf_free(next_seg);
603 if (data_ptr != NULL) {
604 /* Copy characters without NULL terminator */
605 strncpy(buf_ptr, data_ptr, data_size);
606 data_ptr += data_size;
608 remaining_data -= data_size;
610 ret = rte_pktmbuf_chain(head_buf, next_seg);
612 rte_pktmbuf_free(next_seg);
614 "Segment could not chained\n");
623 * Compresses and decompresses buffer with compressdev API and Zlib API
626 test_deflate_comp_decomp(const char * const test_bufs[],
627 unsigned int num_bufs,
629 struct rte_comp_xform *compress_xforms[],
630 struct rte_comp_xform *decompress_xforms[],
631 unsigned int num_xforms,
632 enum rte_comp_op_type state,
634 enum zlib_direction zlib_dir)
636 struct comp_testsuite_params *ts_params = &testsuite_params;
639 struct rte_mbuf *uncomp_bufs[num_bufs];
640 struct rte_mbuf *comp_bufs[num_bufs];
641 struct rte_comp_op *ops[num_bufs];
642 struct rte_comp_op *ops_processed[num_bufs];
643 void *priv_xforms[num_bufs];
644 uint16_t num_enqd, num_deqd, num_total_deqd;
645 uint16_t num_priv_xforms = 0;
646 unsigned int deqd_retries = 0;
647 struct priv_op_data *priv_data;
650 struct rte_mempool *buf_pool;
652 const struct rte_compressdev_capabilities *capa =
653 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
654 char *contig_buf = NULL;
656 /* Initialize all arrays to NULL */
657 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
658 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
659 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
660 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
661 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
664 buf_pool = ts_params->small_mbuf_pool;
666 buf_pool = ts_params->large_mbuf_pool;
668 /* Prepare the source mbufs with the data */
669 ret = rte_pktmbuf_alloc_bulk(buf_pool,
670 uncomp_bufs, num_bufs);
673 "Source mbufs could not be allocated "
674 "from the mempool\n");
679 for (i = 0; i < num_bufs; i++) {
680 data_size = strlen(test_bufs[i]) + 1;
681 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
683 ts_params->small_mbuf_pool,
684 ts_params->large_mbuf_pool,
689 for (i = 0; i < num_bufs; i++) {
690 data_size = strlen(test_bufs[i]) + 1;
691 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
692 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
696 /* Prepare the destination mbufs */
697 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
700 "Destination mbufs could not be allocated "
701 "from the mempool\n");
706 for (i = 0; i < num_bufs; i++) {
707 data_size = strlen(test_bufs[i]) *
708 COMPRESS_BUF_SIZE_RATIO;
709 if (prepare_sgl_bufs(NULL, comp_bufs[i],
711 ts_params->small_mbuf_pool,
712 ts_params->large_mbuf_pool,
718 for (i = 0; i < num_bufs; i++) {
719 data_size = strlen(test_bufs[i]) *
720 COMPRESS_BUF_SIZE_RATIO;
721 rte_pktmbuf_append(comp_bufs[i], data_size);
725 /* Build the compression operations */
726 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
729 "Compress operations could not be allocated "
730 "from the mempool\n");
734 for (i = 0; i < num_bufs; i++) {
735 ops[i]->m_src = uncomp_bufs[i];
736 ops[i]->m_dst = comp_bufs[i];
737 ops[i]->src.offset = 0;
738 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
739 ops[i]->dst.offset = 0;
740 if (state == RTE_COMP_OP_STATELESS) {
741 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
744 "Stateful operations are not supported "
745 "in these tests yet\n");
748 ops[i]->input_chksum = 0;
750 * Store original operation index in private data,
751 * since ordering does not have to be maintained,
752 * when dequeueing from compressdev, so a comparison
753 * at the end of the test can be done.
755 priv_data = (struct priv_op_data *) (ops[i] + 1);
756 priv_data->orig_idx = i;
759 /* Compress data (either with Zlib API or compressdev API */
760 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
761 for (i = 0; i < num_bufs; i++) {
762 const struct rte_comp_xform *compress_xform =
763 compress_xforms[i % num_xforms];
764 ret = compress_zlib(ops[i], compress_xform,
769 ops_processed[i] = ops[i];
772 /* Create compress private xform data */
773 for (i = 0; i < num_xforms; i++) {
774 ret = rte_compressdev_private_xform_create(0,
775 (const struct rte_comp_xform *)compress_xforms[i],
779 "Compression private xform "
780 "could not be created\n");
786 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
787 /* Attach shareable private xform data to ops */
788 for (i = 0; i < num_bufs; i++)
789 ops[i]->private_xform = priv_xforms[i % num_xforms];
791 /* Create rest of the private xforms for the other ops */
792 for (i = num_xforms; i < num_bufs; i++) {
793 ret = rte_compressdev_private_xform_create(0,
794 compress_xforms[i % num_xforms],
798 "Compression private xform "
799 "could not be created\n");
805 /* Attach non shareable private xform data to ops */
806 for (i = 0; i < num_bufs; i++)
807 ops[i]->private_xform = priv_xforms[i];
810 /* Enqueue and dequeue all operations */
811 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
812 if (num_enqd < num_bufs) {
814 "The operations could not be enqueued\n");
821 * If retrying a dequeue call, wait for 10 ms to allow
822 * enough time to the driver to process the operations
824 if (deqd_retries != 0) {
826 * Avoid infinite loop if not all the
827 * operations get out of the device
829 if (deqd_retries == MAX_DEQD_RETRIES) {
831 "Not all operations could be "
835 usleep(DEQUEUE_WAIT_TIME);
837 num_deqd = rte_compressdev_dequeue_burst(0, 0,
838 &ops_processed[num_total_deqd], num_bufs);
839 num_total_deqd += num_deqd;
841 } while (num_total_deqd < num_enqd);
845 /* Free compress private xforms */
846 for (i = 0; i < num_priv_xforms; i++) {
847 rte_compressdev_private_xform_free(0, priv_xforms[i]);
848 priv_xforms[i] = NULL;
853 for (i = 0; i < num_bufs; i++) {
854 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
855 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
856 const struct rte_comp_compress_xform *compress_xform =
857 &compress_xforms[xform_idx]->compress;
858 enum rte_comp_huffman huffman_type =
859 compress_xform->deflate.huffman;
861 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL)
862 strlcpy(engine, "zlib (direct, no pmd)", 22);
864 strlcpy(engine, "pmd", 22);
866 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
867 " %u bytes (level = %d, huffman = %s)\n",
868 buf_idx[priv_data->orig_idx], engine,
869 ops_processed[i]->consumed, ops_processed[i]->produced,
870 compress_xform->level,
871 huffman_type_strings[huffman_type]);
872 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
873 ops_processed[i]->consumed == 0 ? 0 :
874 (float)ops_processed[i]->produced /
875 ops_processed[i]->consumed * 100);
880 * Check operation status and free source mbufs (destination mbuf and
881 * compress operation information is needed for the decompression stage)
883 for (i = 0; i < num_bufs; i++) {
884 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
886 "Some operations were not successful\n");
889 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
890 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
891 uncomp_bufs[priv_data->orig_idx] = NULL;
894 /* Allocate buffers for decompressed data */
895 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
898 "Destination mbufs could not be allocated "
899 "from the mempool\n");
904 for (i = 0; i < num_bufs; i++) {
905 priv_data = (struct priv_op_data *)
906 (ops_processed[i] + 1);
907 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
908 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
910 ts_params->small_mbuf_pool,
911 ts_params->large_mbuf_pool,
917 for (i = 0; i < num_bufs; i++) {
918 priv_data = (struct priv_op_data *)
919 (ops_processed[i] + 1);
920 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
921 rte_pktmbuf_append(uncomp_bufs[i], data_size);
925 /* Build the decompression operations */
926 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
929 "Decompress operations could not be allocated "
930 "from the mempool\n");
934 /* Source buffer is the compressed data from the previous operations */
935 for (i = 0; i < num_bufs; i++) {
936 ops[i]->m_src = ops_processed[i]->m_dst;
937 ops[i]->m_dst = uncomp_bufs[i];
938 ops[i]->src.offset = 0;
940 * Set the length of the compressed data to the
941 * number of bytes that were produced in the previous stage
943 ops[i]->src.length = ops_processed[i]->produced;
944 ops[i]->dst.offset = 0;
945 if (state == RTE_COMP_OP_STATELESS) {
946 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
949 "Stateful operations are not supported "
950 "in these tests yet\n");
953 ops[i]->input_chksum = 0;
955 * Copy private data from previous operations,
956 * to keep the pointer to the original buffer
958 memcpy(ops[i] + 1, ops_processed[i] + 1,
959 sizeof(struct priv_op_data));
963 * Free the previous compress operations,
964 * as it is not needed anymore
966 for (i = 0; i < num_bufs; i++) {
967 rte_comp_op_free(ops_processed[i]);
968 ops_processed[i] = NULL;
971 /* Decompress data (either with Zlib API or compressdev API */
972 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
973 for (i = 0; i < num_bufs; i++) {
974 priv_data = (struct priv_op_data *)(ops[i] + 1);
975 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
976 const struct rte_comp_xform *decompress_xform =
977 decompress_xforms[xform_idx];
979 ret = decompress_zlib(ops[i], decompress_xform);
983 ops_processed[i] = ops[i];
986 /* Create decompress private xform data */
987 for (i = 0; i < num_xforms; i++) {
988 ret = rte_compressdev_private_xform_create(0,
989 (const struct rte_comp_xform *)decompress_xforms[i],
993 "Decompression private xform "
994 "could not be created\n");
1000 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1001 /* Attach shareable private xform data to ops */
1002 for (i = 0; i < num_bufs; i++) {
1003 priv_data = (struct priv_op_data *)(ops[i] + 1);
1004 uint16_t xform_idx = priv_data->orig_idx %
1006 ops[i]->private_xform = priv_xforms[xform_idx];
1009 /* Create rest of the private xforms for the other ops */
1010 for (i = num_xforms; i < num_bufs; i++) {
1011 ret = rte_compressdev_private_xform_create(0,
1012 decompress_xforms[i % num_xforms],
1016 "Decompression private xform "
1017 "could not be created\n");
1023 /* Attach non shareable private xform data to ops */
1024 for (i = 0; i < num_bufs; i++) {
1025 priv_data = (struct priv_op_data *)(ops[i] + 1);
1026 uint16_t xform_idx = priv_data->orig_idx;
1027 ops[i]->private_xform = priv_xforms[xform_idx];
1031 /* Enqueue and dequeue all operations */
1032 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1033 if (num_enqd < num_bufs) {
1035 "The operations could not be enqueued\n");
1042 * If retrying a dequeue call, wait for 10 ms to allow
1043 * enough time to the driver to process the operations
1045 if (deqd_retries != 0) {
1047 * Avoid infinite loop if not all the
1048 * operations get out of the device
1050 if (deqd_retries == MAX_DEQD_RETRIES) {
1052 "Not all operations could be "
1056 usleep(DEQUEUE_WAIT_TIME);
1058 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1059 &ops_processed[num_total_deqd], num_bufs);
1060 num_total_deqd += num_deqd;
1062 } while (num_total_deqd < num_enqd);
1067 for (i = 0; i < num_bufs; i++) {
1068 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1070 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL)
1071 strlcpy(engine, "zlib (direct, no pmd)", 22);
1073 strlcpy(engine, "pmd", 22);
1074 RTE_LOG(DEBUG, USER1,
1075 "Buffer %u decompressed by %s from %u to %u bytes\n",
1076 buf_idx[priv_data->orig_idx], engine,
1077 ops_processed[i]->consumed, ops_processed[i]->produced);
1082 * Check operation status and free source mbuf (destination mbuf and
1083 * compress operation information is still needed)
1085 for (i = 0; i < num_bufs; i++) {
1086 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1088 "Some operations were not successful\n");
1091 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1092 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1093 comp_bufs[priv_data->orig_idx] = NULL;
1097 * Compare the original stream with the decompressed stream
1098 * (in size and the data)
1100 for (i = 0; i < num_bufs; i++) {
1101 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1102 const char *buf1 = test_bufs[priv_data->orig_idx];
1104 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1105 if (contig_buf == NULL) {
1106 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1111 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1112 ops_processed[i]->produced, contig_buf);
1114 if (compare_buffers(buf1, strlen(buf1) + 1,
1115 buf2, ops_processed[i]->produced) < 0)
1118 rte_free(contig_buf);
1125 /* Free resources */
1126 for (i = 0; i < num_bufs; i++) {
1127 rte_pktmbuf_free(uncomp_bufs[i]);
1128 rte_pktmbuf_free(comp_bufs[i]);
1129 rte_comp_op_free(ops[i]);
1130 rte_comp_op_free(ops_processed[i]);
1132 for (i = 0; i < num_priv_xforms; i++) {
1133 if (priv_xforms[i] != NULL)
1134 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1136 rte_free(contig_buf);
1142 test_compressdev_deflate_stateless_fixed(void)
1144 struct comp_testsuite_params *ts_params = &testsuite_params;
1145 const char *test_buffer;
1148 const struct rte_compressdev_capabilities *capab;
1150 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1151 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1153 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1156 struct rte_comp_xform *compress_xform =
1157 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1159 if (compress_xform == NULL) {
1161 "Compress xform could not be created\n");
1166 memcpy(compress_xform, ts_params->def_comp_xform,
1167 sizeof(struct rte_comp_xform));
1168 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1170 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1171 test_buffer = compress_test_bufs[i];
1173 /* Compress with compressdev, decompress with Zlib */
1174 if (test_deflate_comp_decomp(&test_buffer, 1,
1177 &ts_params->def_decomp_xform,
1179 RTE_COMP_OP_STATELESS,
1181 ZLIB_DECOMPRESS) < 0) {
1186 /* Compress with Zlib, decompress with compressdev */
1187 if (test_deflate_comp_decomp(&test_buffer, 1,
1190 &ts_params->def_decomp_xform,
1192 RTE_COMP_OP_STATELESS,
1194 ZLIB_COMPRESS) < 0) {
1203 rte_free(compress_xform);
1208 test_compressdev_deflate_stateless_dynamic(void)
1210 struct comp_testsuite_params *ts_params = &testsuite_params;
1211 const char *test_buffer;
1214 struct rte_comp_xform *compress_xform =
1215 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1217 const struct rte_compressdev_capabilities *capab;
1219 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1220 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1222 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1225 if (compress_xform == NULL) {
1227 "Compress xform could not be created\n");
1232 memcpy(compress_xform, ts_params->def_comp_xform,
1233 sizeof(struct rte_comp_xform));
1234 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1236 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1237 test_buffer = compress_test_bufs[i];
1239 /* Compress with compressdev, decompress with Zlib */
1240 if (test_deflate_comp_decomp(&test_buffer, 1,
1243 &ts_params->def_decomp_xform,
1245 RTE_COMP_OP_STATELESS,
1247 ZLIB_DECOMPRESS) < 0) {
1252 /* Compress with Zlib, decompress with compressdev */
1253 if (test_deflate_comp_decomp(&test_buffer, 1,
1256 &ts_params->def_decomp_xform,
1258 RTE_COMP_OP_STATELESS,
1260 ZLIB_COMPRESS) < 0) {
1269 rte_free(compress_xform);
1274 test_compressdev_deflate_stateless_multi_op(void)
1276 struct comp_testsuite_params *ts_params = &testsuite_params;
1277 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1278 uint16_t buf_idx[num_bufs];
1281 for (i = 0; i < num_bufs; i++)
1284 /* Compress with compressdev, decompress with Zlib */
1285 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1287 &ts_params->def_comp_xform,
1288 &ts_params->def_decomp_xform,
1290 RTE_COMP_OP_STATELESS,
1292 ZLIB_DECOMPRESS) < 0)
1295 /* Compress with Zlib, decompress with compressdev */
1296 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1298 &ts_params->def_comp_xform,
1299 &ts_params->def_decomp_xform,
1301 RTE_COMP_OP_STATELESS,
1306 return TEST_SUCCESS;
1310 test_compressdev_deflate_stateless_multi_level(void)
1312 struct comp_testsuite_params *ts_params = &testsuite_params;
1313 const char *test_buffer;
1317 struct rte_comp_xform *compress_xform =
1318 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1320 if (compress_xform == NULL) {
1322 "Compress xform could not be created\n");
1327 memcpy(compress_xform, ts_params->def_comp_xform,
1328 sizeof(struct rte_comp_xform));
1330 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1331 test_buffer = compress_test_bufs[i];
1332 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1334 compress_xform->compress.level = level;
1335 /* Compress with compressdev, decompress with Zlib */
1336 if (test_deflate_comp_decomp(&test_buffer, 1,
1339 &ts_params->def_decomp_xform,
1341 RTE_COMP_OP_STATELESS,
1343 ZLIB_DECOMPRESS) < 0) {
1353 rte_free(compress_xform);
1357 #define NUM_XFORMS 3
1359 test_compressdev_deflate_stateless_multi_xform(void)
1361 struct comp_testsuite_params *ts_params = &testsuite_params;
1362 uint16_t num_bufs = NUM_XFORMS;
1363 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1364 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1365 const char *test_buffers[NUM_XFORMS];
1367 unsigned int level = RTE_COMP_LEVEL_MIN;
1368 uint16_t buf_idx[num_bufs];
1372 /* Create multiple xforms with various levels */
1373 for (i = 0; i < NUM_XFORMS; i++) {
1374 compress_xforms[i] = rte_malloc(NULL,
1375 sizeof(struct rte_comp_xform), 0);
1376 if (compress_xforms[i] == NULL) {
1378 "Compress xform could not be created\n");
1383 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1384 sizeof(struct rte_comp_xform));
1385 compress_xforms[i]->compress.level = level;
1388 decompress_xforms[i] = rte_malloc(NULL,
1389 sizeof(struct rte_comp_xform), 0);
1390 if (decompress_xforms[i] == NULL) {
1392 "Decompress xform could not be created\n");
1397 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1398 sizeof(struct rte_comp_xform));
1401 for (i = 0; i < NUM_XFORMS; i++) {
1403 /* Use the same buffer in all sessions */
1404 test_buffers[i] = compress_test_bufs[0];
1406 /* Compress with compressdev, decompress with Zlib */
1407 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1412 RTE_COMP_OP_STATELESS,
1414 ZLIB_DECOMPRESS) < 0) {
1421 for (i = 0; i < NUM_XFORMS; i++) {
1422 rte_free(compress_xforms[i]);
1423 rte_free(decompress_xforms[i]);
1430 test_compressdev_deflate_stateless_sgl(void)
1432 struct comp_testsuite_params *ts_params = &testsuite_params;
1434 const char *test_buffer;
1435 const struct rte_compressdev_capabilities *capab;
1437 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1438 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1440 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1443 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1444 test_buffer = compress_test_bufs[i];
1445 /* Compress with compressdev, decompress with Zlib */
1446 if (test_deflate_comp_decomp(&test_buffer, 1,
1448 &ts_params->def_comp_xform,
1449 &ts_params->def_decomp_xform,
1451 RTE_COMP_OP_STATELESS,
1453 ZLIB_DECOMPRESS) < 0)
1456 /* Compress with Zlib, decompress with compressdev */
1457 if (test_deflate_comp_decomp(&test_buffer, 1,
1459 &ts_params->def_comp_xform,
1460 &ts_params->def_decomp_xform,
1462 RTE_COMP_OP_STATELESS,
1468 return TEST_SUCCESS;
1471 static struct unit_test_suite compressdev_testsuite = {
1472 .suite_name = "compressdev unit test suite",
1473 .setup = testsuite_setup,
1474 .teardown = testsuite_teardown,
1475 .unit_test_cases = {
1476 TEST_CASE_ST(NULL, NULL,
1477 test_compressdev_invalid_configuration),
1478 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1479 test_compressdev_deflate_stateless_fixed),
1480 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1481 test_compressdev_deflate_stateless_dynamic),
1482 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1483 test_compressdev_deflate_stateless_multi_op),
1484 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1485 test_compressdev_deflate_stateless_multi_level),
1486 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1487 test_compressdev_deflate_stateless_multi_xform),
1488 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1489 test_compressdev_deflate_stateless_sgl),
1490 TEST_CASES_END() /**< NULL terminate unit test array */
1495 test_compressdev(void)
1497 return unit_test_suite_runner(&compressdev_testsuite);
1500 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);