test/compress: add scatter-gather tests
[dpdk.git] / test / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_compressdev.h>
13
14 #include "test_compressdev_test_buffer.h"
15 #include "test.h"
16
17 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
18
19 #define DEFAULT_WINDOW_SIZE 15
20 #define DEFAULT_MEM_LEVEL 8
21 #define MAX_DEQD_RETRIES 10
22 #define DEQUEUE_WAIT_TIME 10000
23
24 /*
25  * 30% extra size for compressed data compared to original data,
26  * in case data size cannot be reduced and it is actually bigger
27  * due to the compress block headers
28  */
29 #define COMPRESS_BUF_SIZE_RATIO 1.3
30 #define NUM_LARGE_MBUFS 16
31 #define SEG_SIZE 256
32 #define NUM_OPS 16
33 #define NUM_MAX_XFORMS 16
34 #define NUM_MAX_INFLIGHT_OPS 128
35 #define CACHE_SIZE 0
36
37 const char *
38 huffman_type_strings[] = {
39         [RTE_COMP_HUFFMAN_DEFAULT]      = "PMD default",
40         [RTE_COMP_HUFFMAN_FIXED]        = "Fixed",
41         [RTE_COMP_HUFFMAN_DYNAMIC]      = "Dynamic"
42 };
43
44 enum zlib_direction {
45         ZLIB_NONE,
46         ZLIB_COMPRESS,
47         ZLIB_DECOMPRESS,
48         ZLIB_ALL
49 };
50
51 struct priv_op_data {
52         uint16_t orig_idx;
53 };
54
55 struct comp_testsuite_params {
56         struct rte_mempool *large_mbuf_pool;
57         struct rte_mempool *small_mbuf_pool;
58         struct rte_mempool *op_pool;
59         struct rte_comp_xform *def_comp_xform;
60         struct rte_comp_xform *def_decomp_xform;
61 };
62
63 static struct comp_testsuite_params testsuite_params = { 0 };
64
65 static void
66 testsuite_teardown(void)
67 {
68         struct comp_testsuite_params *ts_params = &testsuite_params;
69
70         rte_mempool_free(ts_params->large_mbuf_pool);
71         rte_mempool_free(ts_params->small_mbuf_pool);
72         rte_mempool_free(ts_params->op_pool);
73         rte_free(ts_params->def_comp_xform);
74         rte_free(ts_params->def_decomp_xform);
75 }
76
77 static int
78 testsuite_setup(void)
79 {
80         struct comp_testsuite_params *ts_params = &testsuite_params;
81         uint32_t max_buf_size = 0;
82         unsigned int i;
83
84         if (rte_compressdev_count() == 0) {
85                 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
86                 return TEST_FAILED;
87         }
88
89         RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
90                                 rte_compressdev_name_get(0));
91
92         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
93                 max_buf_size = RTE_MAX(max_buf_size,
94                                 strlen(compress_test_bufs[i]) + 1);
95
96         /*
97          * Buffers to be used in compression and decompression.
98          * Since decompressed data might be larger than
99          * compressed data (due to block header),
100          * buffers should be big enough for both cases.
101          */
102         max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
103         ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
104                         NUM_LARGE_MBUFS,
105                         CACHE_SIZE, 0,
106                         max_buf_size + RTE_PKTMBUF_HEADROOM,
107                         rte_socket_id());
108         if (ts_params->large_mbuf_pool == NULL) {
109                 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
110                 return TEST_FAILED;
111         }
112
113         /* Create mempool with smaller buffers for SGL testing */
114         uint16_t max_segs_per_buf = DIV_CEIL(max_buf_size, SEG_SIZE);
115
116         ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
117                         NUM_LARGE_MBUFS * max_segs_per_buf,
118                         CACHE_SIZE, 0,
119                         SEG_SIZE + RTE_PKTMBUF_HEADROOM,
120                         rte_socket_id());
121         if (ts_params->small_mbuf_pool == NULL) {
122                 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
123                 goto exit;
124         }
125
126         ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
127                                 0, sizeof(struct priv_op_data),
128                                 rte_socket_id());
129         if (ts_params->op_pool == NULL) {
130                 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
131                 goto exit;
132         }
133
134         ts_params->def_comp_xform =
135                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
136         if (ts_params->def_comp_xform == NULL) {
137                 RTE_LOG(ERR, USER1,
138                         "Default compress xform could not be created\n");
139                 goto exit;
140         }
141         ts_params->def_decomp_xform =
142                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143         if (ts_params->def_decomp_xform == NULL) {
144                 RTE_LOG(ERR, USER1,
145                         "Default decompress xform could not be created\n");
146                 goto exit;
147         }
148
149         /* Initializes default values for compress/decompress xforms */
150         ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
151         ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
152         ts_params->def_comp_xform->compress.deflate.huffman =
153                                                 RTE_COMP_HUFFMAN_DEFAULT;
154         ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
155         ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
156         ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
157
158         ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
159         ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
160         ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
161         ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
162
163         return TEST_SUCCESS;
164
165 exit:
166         testsuite_teardown();
167
168         return TEST_FAILED;
169 }
170
171 static int
172 generic_ut_setup(void)
173 {
174         /* Configure compressdev (one device, one queue pair) */
175         struct rte_compressdev_config config = {
176                 .socket_id = rte_socket_id(),
177                 .nb_queue_pairs = 1,
178                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
179                 .max_nb_streams = 0
180         };
181
182         if (rte_compressdev_configure(0, &config) < 0) {
183                 RTE_LOG(ERR, USER1, "Device configuration failed\n");
184                 return -1;
185         }
186
187         if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
188                         rte_socket_id()) < 0) {
189                 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
190                 return -1;
191         }
192
193         if (rte_compressdev_start(0) < 0) {
194                 RTE_LOG(ERR, USER1, "Device could not be started\n");
195                 return -1;
196         }
197
198         return 0;
199 }
200
201 static void
202 generic_ut_teardown(void)
203 {
204         rte_compressdev_stop(0);
205         if (rte_compressdev_close(0) < 0)
206                 RTE_LOG(ERR, USER1, "Device could not be closed\n");
207 }
208
209 static int
210 test_compressdev_invalid_configuration(void)
211 {
212         struct rte_compressdev_config invalid_config;
213         struct rte_compressdev_config valid_config = {
214                 .socket_id = rte_socket_id(),
215                 .nb_queue_pairs = 1,
216                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
217                 .max_nb_streams = 0
218         };
219         struct rte_compressdev_info dev_info;
220
221         /* Invalid configuration with 0 queue pairs */
222         memcpy(&invalid_config, &valid_config,
223                         sizeof(struct rte_compressdev_config));
224         invalid_config.nb_queue_pairs = 0;
225
226         TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
227                         "Device configuration was successful "
228                         "with no queue pairs (invalid)\n");
229
230         /*
231          * Invalid configuration with too many queue pairs
232          * (if there is an actual maximum number of queue pairs)
233          */
234         rte_compressdev_info_get(0, &dev_info);
235         if (dev_info.max_nb_queue_pairs != 0) {
236                 memcpy(&invalid_config, &valid_config,
237                         sizeof(struct rte_compressdev_config));
238                 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
239
240                 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
241                                 "Device configuration was successful "
242                                 "with too many queue pairs (invalid)\n");
243         }
244
245         /* Invalid queue pair setup, with no number of queue pairs set */
246         TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
247                                 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
248                         "Queue pair setup was successful "
249                         "with no queue pairs set (invalid)\n");
250
251         return TEST_SUCCESS;
252 }
253
254 static int
255 compare_buffers(const char *buffer1, uint32_t buffer1_len,
256                 const char *buffer2, uint32_t buffer2_len)
257 {
258         if (buffer1_len != buffer2_len) {
259                 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
260                 return -1;
261         }
262
263         if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
264                 RTE_LOG(ERR, USER1, "Buffers are different\n");
265                 return -1;
266         }
267
268         return 0;
269 }
270
271 /*
272  * Maps compressdev and Zlib flush flags
273  */
274 static int
275 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
276 {
277         switch (flag) {
278         case RTE_COMP_FLUSH_NONE:
279                 return Z_NO_FLUSH;
280         case RTE_COMP_FLUSH_SYNC:
281                 return Z_SYNC_FLUSH;
282         case RTE_COMP_FLUSH_FULL:
283                 return Z_FULL_FLUSH;
284         case RTE_COMP_FLUSH_FINAL:
285                 return Z_FINISH;
286         /*
287          * There should be only the values above,
288          * so this should never happen
289          */
290         default:
291                 return -1;
292         }
293 }
294
295 static int
296 compress_zlib(struct rte_comp_op *op,
297                 const struct rte_comp_xform *xform, int mem_level)
298 {
299         z_stream stream;
300         int zlib_flush;
301         int strategy, window_bits, comp_level;
302         int ret = TEST_FAILED;
303         uint8_t *single_src_buf = NULL;
304         uint8_t *single_dst_buf = NULL;
305
306         /* initialize zlib stream */
307         stream.zalloc = Z_NULL;
308         stream.zfree = Z_NULL;
309         stream.opaque = Z_NULL;
310
311         if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
312                 strategy = Z_FIXED;
313         else
314                 strategy = Z_DEFAULT_STRATEGY;
315
316         /*
317          * Window bits is the base two logarithm of the window size (in bytes).
318          * When doing raw DEFLATE, this number will be negative.
319          */
320         window_bits = -(xform->compress.window_size);
321
322         comp_level = xform->compress.level;
323
324         if (comp_level != RTE_COMP_LEVEL_NONE)
325                 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
326                         window_bits, mem_level, strategy);
327         else
328                 ret = deflateInit(&stream, Z_NO_COMPRESSION);
329
330         if (ret != Z_OK) {
331                 printf("Zlib deflate could not be initialized\n");
332                 goto exit;
333         }
334
335         /* Assuming stateless operation */
336         /* SGL */
337         if (op->m_src->nb_segs > 1) {
338                 single_src_buf = rte_malloc(NULL,
339                                 rte_pktmbuf_pkt_len(op->m_src), 0);
340                 if (single_src_buf == NULL) {
341                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
342                         goto exit;
343                 }
344                 single_dst_buf = rte_malloc(NULL,
345                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
346                 if (single_dst_buf == NULL) {
347                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
348                         goto exit;
349                 }
350                 if (rte_pktmbuf_read(op->m_src, 0,
351                                         rte_pktmbuf_pkt_len(op->m_src),
352                                         single_src_buf) == NULL) {
353                         RTE_LOG(ERR, USER1,
354                                 "Buffer could not be read entirely\n");
355                         goto exit;
356                 }
357
358                 stream.avail_in = op->src.length;
359                 stream.next_in = single_src_buf;
360                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
361                 stream.next_out = single_dst_buf;
362
363         } else {
364                 stream.avail_in = op->src.length;
365                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
366                 stream.avail_out = op->m_dst->data_len;
367                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
368         }
369         /* Stateless operation, all buffer will be compressed in one go */
370         zlib_flush = map_zlib_flush_flag(op->flush_flag);
371         ret = deflate(&stream, zlib_flush);
372
373         if (stream.avail_in != 0) {
374                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
375                 goto exit;
376         }
377
378         if (ret != Z_STREAM_END)
379                 goto exit;
380
381         /* Copy data to destination SGL */
382         if (op->m_src->nb_segs > 1) {
383                 uint32_t remaining_data = stream.total_out;
384                 uint8_t *src_data = single_dst_buf;
385                 struct rte_mbuf *dst_buf = op->m_dst;
386
387                 while (remaining_data > 0) {
388                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
389                                         uint8_t *);
390                         /* Last segment */
391                         if (remaining_data < dst_buf->data_len) {
392                                 memcpy(dst_data, src_data, remaining_data);
393                                 remaining_data = 0;
394                         } else {
395                                 memcpy(dst_data, src_data, dst_buf->data_len);
396                                 remaining_data -= dst_buf->data_len;
397                                 src_data += dst_buf->data_len;
398                                 dst_buf = dst_buf->next;
399                         }
400                 }
401         }
402
403         op->consumed = stream.total_in;
404         op->produced = stream.total_out;
405         op->status = RTE_COMP_OP_STATUS_SUCCESS;
406
407         deflateReset(&stream);
408
409         ret = 0;
410 exit:
411         deflateEnd(&stream);
412         rte_free(single_src_buf);
413         rte_free(single_dst_buf);
414
415         return ret;
416 }
417
418 static int
419 decompress_zlib(struct rte_comp_op *op,
420                 const struct rte_comp_xform *xform)
421 {
422         z_stream stream;
423         int window_bits;
424         int zlib_flush;
425         int ret = TEST_FAILED;
426         uint8_t *single_src_buf = NULL;
427         uint8_t *single_dst_buf = NULL;
428
429         /* initialize zlib stream */
430         stream.zalloc = Z_NULL;
431         stream.zfree = Z_NULL;
432         stream.opaque = Z_NULL;
433
434         /*
435          * Window bits is the base two logarithm of the window size (in bytes).
436          * When doing raw DEFLATE, this number will be negative.
437          */
438         window_bits = -(xform->decompress.window_size);
439
440         ret = inflateInit2(&stream, window_bits);
441
442         if (ret != Z_OK) {
443                 printf("Zlib deflate could not be initialized\n");
444                 goto exit;
445         }
446
447         /* Assuming stateless operation */
448         /* SGL */
449         if (op->m_src->nb_segs > 1) {
450                 single_src_buf = rte_malloc(NULL,
451                                 rte_pktmbuf_pkt_len(op->m_src), 0);
452                 if (single_src_buf == NULL) {
453                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
454                         goto exit;
455                 }
456                 single_dst_buf = rte_malloc(NULL,
457                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
458                 if (single_dst_buf == NULL) {
459                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
460                         goto exit;
461                 }
462                 if (rte_pktmbuf_read(op->m_src, 0,
463                                         rte_pktmbuf_pkt_len(op->m_src),
464                                         single_src_buf) == NULL) {
465                         RTE_LOG(ERR, USER1,
466                                 "Buffer could not be read entirely\n");
467                         goto exit;
468                 }
469
470                 stream.avail_in = op->src.length;
471                 stream.next_in = single_src_buf;
472                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
473                 stream.next_out = single_dst_buf;
474
475         } else {
476                 stream.avail_in = op->src.length;
477                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
478                 stream.avail_out = op->m_dst->data_len;
479                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
480         }
481
482         /* Stateless operation, all buffer will be compressed in one go */
483         zlib_flush = map_zlib_flush_flag(op->flush_flag);
484         ret = inflate(&stream, zlib_flush);
485
486         if (stream.avail_in != 0) {
487                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
488                 goto exit;
489         }
490
491         if (ret != Z_STREAM_END)
492                 goto exit;
493
494         if (op->m_src->nb_segs > 1) {
495                 uint32_t remaining_data = stream.total_out;
496                 uint8_t *src_data = single_dst_buf;
497                 struct rte_mbuf *dst_buf = op->m_dst;
498
499                 while (remaining_data > 0) {
500                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
501                                         uint8_t *);
502                         /* Last segment */
503                         if (remaining_data < dst_buf->data_len) {
504                                 memcpy(dst_data, src_data, remaining_data);
505                                 remaining_data = 0;
506                         } else {
507                                 memcpy(dst_data, src_data, dst_buf->data_len);
508                                 remaining_data -= dst_buf->data_len;
509                                 src_data += dst_buf->data_len;
510                                 dst_buf = dst_buf->next;
511                         }
512                 }
513         }
514
515         op->consumed = stream.total_in;
516         op->produced = stream.total_out;
517         op->status = RTE_COMP_OP_STATUS_SUCCESS;
518
519         inflateReset(&stream);
520
521         ret = 0;
522 exit:
523         inflateEnd(&stream);
524
525         return ret;
526 }
527
528 static int
529 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
530                 uint32_t total_data_size,
531                 struct rte_mempool *pool)
532 {
533         uint32_t remaining_data = total_data_size;
534         uint16_t num_remaining_segs =
535                         DIV_CEIL(remaining_data, SEG_SIZE);
536         struct rte_mbuf *next_seg;
537         uint32_t data_size;
538         char *buf_ptr;
539         const char *data_ptr = test_buf;
540         unsigned int i;
541         int ret;
542
543         /*
544          * Allocate data in the first segment (header) and
545          * copy data if test buffer is provided
546          */
547         if (remaining_data < SEG_SIZE)
548                 data_size = remaining_data;
549         else
550                 data_size = SEG_SIZE;
551         buf_ptr = rte_pktmbuf_append(head_buf, data_size);
552         if (buf_ptr == NULL) {
553                 RTE_LOG(ERR, USER1,
554                         "Not enough space in the buffer\n");
555                 return -1;
556         }
557
558         if (data_ptr != NULL) {
559                 /* Copy characters without NULL terminator */
560                 strncpy(buf_ptr, data_ptr, data_size);
561                 data_ptr += data_size;
562         }
563         remaining_data -= data_size;
564
565         /*
566          * Allocate the rest of the segments,
567          * copy the rest of the data and chain the segments.
568          */
569         for (i = 0; i < num_remaining_segs; i++) {
570                 next_seg = rte_pktmbuf_alloc(pool);
571                 if (next_seg == NULL) {
572                         RTE_LOG(ERR, USER1,
573                                 "New segment could not be allocated "
574                                 "from the mempool\n");
575                         return -1;
576                 }
577                 if (remaining_data < SEG_SIZE)
578                         data_size = remaining_data;
579                 else
580                         data_size = SEG_SIZE;
581                 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
582                 if (buf_ptr == NULL) {
583                         RTE_LOG(ERR, USER1,
584                                 "Not enough space in the buffer\n");
585                         rte_pktmbuf_free(next_seg);
586                         return -1;
587                 }
588                 if (data_ptr != NULL) {
589                         /* Copy characters without NULL terminator */
590                         strncpy(buf_ptr, data_ptr, data_size);
591                         data_ptr += data_size;
592                 }
593                 remaining_data -= data_size;
594
595                 ret = rte_pktmbuf_chain(head_buf, next_seg);
596                 if (ret != 0) {
597                         rte_pktmbuf_free(next_seg);
598                         RTE_LOG(ERR, USER1,
599                                 "Segment could not chained\n");
600                         return -1;
601                 }
602         }
603
604         return 0;
605 }
606
607 /*
608  * Compresses and decompresses buffer with compressdev API and Zlib API
609  */
610 static int
611 test_deflate_comp_decomp(const char * const test_bufs[],
612                 unsigned int num_bufs,
613                 uint16_t buf_idx[],
614                 struct rte_comp_xform *compress_xforms[],
615                 struct rte_comp_xform *decompress_xforms[],
616                 unsigned int num_xforms,
617                 enum rte_comp_op_type state,
618                 unsigned int sgl,
619                 enum zlib_direction zlib_dir)
620 {
621         struct comp_testsuite_params *ts_params = &testsuite_params;
622         int ret_status = -1;
623         int ret;
624         struct rte_mbuf *uncomp_bufs[num_bufs];
625         struct rte_mbuf *comp_bufs[num_bufs];
626         struct rte_comp_op *ops[num_bufs];
627         struct rte_comp_op *ops_processed[num_bufs];
628         void *priv_xforms[num_bufs];
629         uint16_t num_enqd, num_deqd, num_total_deqd;
630         uint16_t num_priv_xforms = 0;
631         unsigned int deqd_retries = 0;
632         struct priv_op_data *priv_data;
633         char *buf_ptr;
634         unsigned int i;
635         struct rte_mempool *buf_pool;
636         uint32_t data_size;
637         const struct rte_compressdev_capabilities *capa =
638                 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
639         char *contig_buf = NULL;
640
641         /* Initialize all arrays to NULL */
642         memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
643         memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
644         memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
645         memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
646         memset(priv_xforms, 0, sizeof(void *) * num_bufs);
647
648         if (sgl)
649                 buf_pool = ts_params->small_mbuf_pool;
650         else
651                 buf_pool = ts_params->large_mbuf_pool;
652
653         /* Prepare the source mbufs with the data */
654         ret = rte_pktmbuf_alloc_bulk(buf_pool,
655                                 uncomp_bufs, num_bufs);
656         if (ret < 0) {
657                 RTE_LOG(ERR, USER1,
658                         "Source mbufs could not be allocated "
659                         "from the mempool\n");
660                 goto exit;
661         }
662
663         if (sgl) {
664                 for (i = 0; i < num_bufs; i++) {
665                         data_size = strlen(test_bufs[i]) + 1;
666                         if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
667                                         data_size,
668                                         buf_pool) < 0)
669                                 goto exit;
670                 }
671         } else {
672                 for (i = 0; i < num_bufs; i++) {
673                         data_size = strlen(test_bufs[i]) + 1;
674                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
675                         snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
676                 }
677         }
678
679         /* Prepare the destination mbufs */
680         ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
681         if (ret < 0) {
682                 RTE_LOG(ERR, USER1,
683                         "Destination mbufs could not be allocated "
684                         "from the mempool\n");
685                 goto exit;
686         }
687
688         if (sgl) {
689                 for (i = 0; i < num_bufs; i++) {
690                         data_size = strlen(test_bufs[i]) *
691                                 COMPRESS_BUF_SIZE_RATIO;
692                         if (prepare_sgl_bufs(NULL, comp_bufs[i],
693                                         data_size,
694                                         buf_pool) < 0)
695                                 goto exit;
696                 }
697
698         } else {
699                 for (i = 0; i < num_bufs; i++) {
700                         data_size = strlen(test_bufs[i]) *
701                                 COMPRESS_BUF_SIZE_RATIO;
702                         rte_pktmbuf_append(comp_bufs[i], data_size);
703                 }
704         }
705
706         /* Build the compression operations */
707         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
708         if (ret < 0) {
709                 RTE_LOG(ERR, USER1,
710                         "Compress operations could not be allocated "
711                         "from the mempool\n");
712                 goto exit;
713         }
714
715         for (i = 0; i < num_bufs; i++) {
716                 ops[i]->m_src = uncomp_bufs[i];
717                 ops[i]->m_dst = comp_bufs[i];
718                 ops[i]->src.offset = 0;
719                 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
720                 ops[i]->dst.offset = 0;
721                 if (state == RTE_COMP_OP_STATELESS) {
722                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
723                 } else {
724                         RTE_LOG(ERR, USER1,
725                                 "Stateful operations are not supported "
726                                 "in these tests yet\n");
727                         goto exit;
728                 }
729                 ops[i]->input_chksum = 0;
730                 /*
731                  * Store original operation index in private data,
732                  * since ordering does not have to be maintained,
733                  * when dequeueing from compressdev, so a comparison
734                  * at the end of the test can be done.
735                  */
736                 priv_data = (struct priv_op_data *) (ops[i] + 1);
737                 priv_data->orig_idx = i;
738         }
739
740         /* Compress data (either with Zlib API or compressdev API */
741         if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
742                 for (i = 0; i < num_bufs; i++) {
743                         const struct rte_comp_xform *compress_xform =
744                                 compress_xforms[i % num_xforms];
745                         ret = compress_zlib(ops[i], compress_xform,
746                                         DEFAULT_MEM_LEVEL);
747                         if (ret < 0)
748                                 goto exit;
749
750                         ops_processed[i] = ops[i];
751                 }
752         } else {
753                 /* Create compress private xform data */
754                 for (i = 0; i < num_xforms; i++) {
755                         ret = rte_compressdev_private_xform_create(0,
756                                 (const struct rte_comp_xform *)compress_xforms[i],
757                                 &priv_xforms[i]);
758                         if (ret < 0) {
759                                 RTE_LOG(ERR, USER1,
760                                         "Compression private xform "
761                                         "could not be created\n");
762                                 goto exit;
763                         }
764                         num_priv_xforms++;
765                 }
766
767                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
768                         /* Attach shareable private xform data to ops */
769                         for (i = 0; i < num_bufs; i++)
770                                 ops[i]->private_xform = priv_xforms[i % num_xforms];
771                 } else {
772                         /* Create rest of the private xforms for the other ops */
773                         for (i = num_xforms; i < num_bufs; i++) {
774                                 ret = rte_compressdev_private_xform_create(0,
775                                         compress_xforms[i % num_xforms],
776                                         &priv_xforms[i]);
777                                 if (ret < 0) {
778                                         RTE_LOG(ERR, USER1,
779                                                 "Compression private xform "
780                                                 "could not be created\n");
781                                         goto exit;
782                                 }
783                                 num_priv_xforms++;
784                         }
785
786                         /* Attach non shareable private xform data to ops */
787                         for (i = 0; i < num_bufs; i++)
788                                 ops[i]->private_xform = priv_xforms[i];
789                 }
790
791                 /* Enqueue and dequeue all operations */
792                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
793                 if (num_enqd < num_bufs) {
794                         RTE_LOG(ERR, USER1,
795                                 "The operations could not be enqueued\n");
796                         goto exit;
797                 }
798
799                 num_total_deqd = 0;
800                 do {
801                         /*
802                          * If retrying a dequeue call, wait for 10 ms to allow
803                          * enough time to the driver to process the operations
804                          */
805                         if (deqd_retries != 0) {
806                                 /*
807                                  * Avoid infinite loop if not all the
808                                  * operations get out of the device
809                                  */
810                                 if (deqd_retries == MAX_DEQD_RETRIES) {
811                                         RTE_LOG(ERR, USER1,
812                                                 "Not all operations could be "
813                                                 "dequeued\n");
814                                         goto exit;
815                                 }
816                                 usleep(DEQUEUE_WAIT_TIME);
817                         }
818                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
819                                         &ops_processed[num_total_deqd], num_bufs);
820                         num_total_deqd += num_deqd;
821                         deqd_retries++;
822                 } while (num_total_deqd < num_enqd);
823
824                 deqd_retries = 0;
825
826                 /* Free compress private xforms */
827                 for (i = 0; i < num_priv_xforms; i++) {
828                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
829                         priv_xforms[i] = NULL;
830                 }
831                 num_priv_xforms = 0;
832         }
833
834         for (i = 0; i < num_bufs; i++) {
835                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
836                 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
837                 const struct rte_comp_compress_xform *compress_xform =
838                                 &compress_xforms[xform_idx]->compress;
839                 enum rte_comp_huffman huffman_type =
840                         compress_xform->deflate.huffman;
841                 RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
842                         "(level = %d, huffman = %s)\n",
843                         buf_idx[priv_data->orig_idx],
844                         ops_processed[i]->consumed, ops_processed[i]->produced,
845                         compress_xform->level,
846                         huffman_type_strings[huffman_type]);
847                 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
848                         (float)ops_processed[i]->produced /
849                         ops_processed[i]->consumed * 100);
850                 ops[i] = NULL;
851         }
852
853         /*
854          * Check operation status and free source mbufs (destination mbuf and
855          * compress operation information is needed for the decompression stage)
856          */
857         for (i = 0; i < num_bufs; i++) {
858                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
859                         RTE_LOG(ERR, USER1,
860                                 "Some operations were not successful\n");
861                         goto exit;
862                 }
863                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
864                 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
865                 uncomp_bufs[priv_data->orig_idx] = NULL;
866         }
867
868         /* Allocate buffers for decompressed data */
869         ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
870         if (ret < 0) {
871                 RTE_LOG(ERR, USER1,
872                         "Destination mbufs could not be allocated "
873                         "from the mempool\n");
874                 goto exit;
875         }
876
877         if (sgl) {
878                 for (i = 0; i < num_bufs; i++) {
879                         priv_data = (struct priv_op_data *)
880                                         (ops_processed[i] + 1);
881                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
882                         if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
883                                         data_size, buf_pool) < 0)
884                                 goto exit;
885                 }
886
887         } else {
888                 for (i = 0; i < num_bufs; i++) {
889                         priv_data = (struct priv_op_data *)
890                                         (ops_processed[i] + 1);
891                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
892                         rte_pktmbuf_append(uncomp_bufs[i], data_size);
893                 }
894         }
895
896         /* Build the decompression operations */
897         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
898         if (ret < 0) {
899                 RTE_LOG(ERR, USER1,
900                         "Decompress operations could not be allocated "
901                         "from the mempool\n");
902                 goto exit;
903         }
904
905         /* Source buffer is the compressed data from the previous operations */
906         for (i = 0; i < num_bufs; i++) {
907                 ops[i]->m_src = ops_processed[i]->m_dst;
908                 ops[i]->m_dst = uncomp_bufs[i];
909                 ops[i]->src.offset = 0;
910                 /*
911                  * Set the length of the compressed data to the
912                  * number of bytes that were produced in the previous stage
913                  */
914                 ops[i]->src.length = ops_processed[i]->produced;
915                 ops[i]->dst.offset = 0;
916                 if (state == RTE_COMP_OP_STATELESS) {
917                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
918                 } else {
919                         RTE_LOG(ERR, USER1,
920                                 "Stateful operations are not supported "
921                                 "in these tests yet\n");
922                         goto exit;
923                 }
924                 ops[i]->input_chksum = 0;
925                 /*
926                  * Copy private data from previous operations,
927                  * to keep the pointer to the original buffer
928                  */
929                 memcpy(ops[i] + 1, ops_processed[i] + 1,
930                                 sizeof(struct priv_op_data));
931         }
932
933         /*
934          * Free the previous compress operations,
935          * as it is not needed anymore
936          */
937         for (i = 0; i < num_bufs; i++) {
938                 rte_comp_op_free(ops_processed[i]);
939                 ops_processed[i] = NULL;
940         }
941
942         /* Decompress data (either with Zlib API or compressdev API */
943         if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
944                 for (i = 0; i < num_bufs; i++) {
945                         priv_data = (struct priv_op_data *)(ops[i] + 1);
946                         uint16_t xform_idx = priv_data->orig_idx % num_xforms;
947                         const struct rte_comp_xform *decompress_xform =
948                                 decompress_xforms[xform_idx];
949
950                         ret = decompress_zlib(ops[i], decompress_xform);
951                         if (ret < 0)
952                                 goto exit;
953
954                         ops_processed[i] = ops[i];
955                 }
956         } else {
957                 /* Create decompress private xform data */
958                 for (i = 0; i < num_xforms; i++) {
959                         ret = rte_compressdev_private_xform_create(0,
960                                 (const struct rte_comp_xform *)decompress_xforms[i],
961                                 &priv_xforms[i]);
962                         if (ret < 0) {
963                                 RTE_LOG(ERR, USER1,
964                                         "Decompression private xform "
965                                         "could not be created\n");
966                                 goto exit;
967                         }
968                         num_priv_xforms++;
969                 }
970
971                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
972                         /* Attach shareable private xform data to ops */
973                         for (i = 0; i < num_bufs; i++) {
974                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
975                                 uint16_t xform_idx = priv_data->orig_idx %
976                                                                 num_xforms;
977                                 ops[i]->private_xform = priv_xforms[xform_idx];
978                         }
979                 } else {
980                         /* Create rest of the private xforms for the other ops */
981                         for (i = num_xforms; i < num_bufs; i++) {
982                                 ret = rte_compressdev_private_xform_create(0,
983                                         decompress_xforms[i % num_xforms],
984                                         &priv_xforms[i]);
985                                 if (ret < 0) {
986                                         RTE_LOG(ERR, USER1,
987                                                 "Decompression private xform "
988                                                 "could not be created\n");
989                                         goto exit;
990                                 }
991                                 num_priv_xforms++;
992                         }
993
994                         /* Attach non shareable private xform data to ops */
995                         for (i = 0; i < num_bufs; i++) {
996                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
997                                 uint16_t xform_idx = priv_data->orig_idx;
998                                 ops[i]->private_xform = priv_xforms[xform_idx];
999                         }
1000                 }
1001
1002                 /* Enqueue and dequeue all operations */
1003                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1004                 if (num_enqd < num_bufs) {
1005                         RTE_LOG(ERR, USER1,
1006                                 "The operations could not be enqueued\n");
1007                         goto exit;
1008                 }
1009
1010                 num_total_deqd = 0;
1011                 do {
1012                         /*
1013                          * If retrying a dequeue call, wait for 10 ms to allow
1014                          * enough time to the driver to process the operations
1015                          */
1016                         if (deqd_retries != 0) {
1017                                 /*
1018                                  * Avoid infinite loop if not all the
1019                                  * operations get out of the device
1020                                  */
1021                                 if (deqd_retries == MAX_DEQD_RETRIES) {
1022                                         RTE_LOG(ERR, USER1,
1023                                                 "Not all operations could be "
1024                                                 "dequeued\n");
1025                                         goto exit;
1026                                 }
1027                                 usleep(DEQUEUE_WAIT_TIME);
1028                         }
1029                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
1030                                         &ops_processed[num_total_deqd], num_bufs);
1031                         num_total_deqd += num_deqd;
1032                         deqd_retries++;
1033                 } while (num_total_deqd < num_enqd);
1034
1035                 deqd_retries = 0;
1036         }
1037
1038         for (i = 0; i < num_bufs; i++) {
1039                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1040                 RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
1041                         buf_idx[priv_data->orig_idx],
1042                         ops_processed[i]->consumed, ops_processed[i]->produced);
1043                 ops[i] = NULL;
1044         }
1045
1046         /*
1047          * Check operation status and free source mbuf (destination mbuf and
1048          * compress operation information is still needed)
1049          */
1050         for (i = 0; i < num_bufs; i++) {
1051                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1052                         RTE_LOG(ERR, USER1,
1053                                 "Some operations were not successful\n");
1054                         goto exit;
1055                 }
1056                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1057                 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1058                 comp_bufs[priv_data->orig_idx] = NULL;
1059         }
1060
1061         /*
1062          * Compare the original stream with the decompressed stream
1063          * (in size and the data)
1064          */
1065         for (i = 0; i < num_bufs; i++) {
1066                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1067                 const char *buf1 = test_bufs[priv_data->orig_idx];
1068                 const char *buf2;
1069                 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1070                 if (contig_buf == NULL) {
1071                         RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1072                                         "be allocated\n");
1073                         goto exit;
1074                 }
1075
1076                 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1077                                 ops_processed[i]->produced, contig_buf);
1078
1079                 if (compare_buffers(buf1, strlen(buf1) + 1,
1080                                 buf2, ops_processed[i]->produced) < 0)
1081                         goto exit;
1082
1083                 rte_free(contig_buf);
1084                 contig_buf = NULL;
1085         }
1086
1087         ret_status = 0;
1088
1089 exit:
1090         /* Free resources */
1091         for (i = 0; i < num_bufs; i++) {
1092                 rte_pktmbuf_free(uncomp_bufs[i]);
1093                 rte_pktmbuf_free(comp_bufs[i]);
1094                 rte_comp_op_free(ops[i]);
1095                 rte_comp_op_free(ops_processed[i]);
1096         }
1097         for (i = 0; i < num_priv_xforms; i++) {
1098                 if (priv_xforms[i] != NULL)
1099                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1100         }
1101         rte_free(contig_buf);
1102
1103         return ret_status;
1104 }
1105
1106 static int
1107 test_compressdev_deflate_stateless_fixed(void)
1108 {
1109         struct comp_testsuite_params *ts_params = &testsuite_params;
1110         const char *test_buffer;
1111         uint16_t i;
1112         int ret;
1113         const struct rte_compressdev_capabilities *capab;
1114
1115         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1116         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1117
1118         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1119                 return -ENOTSUP;
1120
1121         struct rte_comp_xform *compress_xform =
1122                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1123
1124         if (compress_xform == NULL) {
1125                 RTE_LOG(ERR, USER1,
1126                         "Compress xform could not be created\n");
1127                 ret = TEST_FAILED;
1128                 goto exit;
1129         }
1130
1131         memcpy(compress_xform, ts_params->def_comp_xform,
1132                         sizeof(struct rte_comp_xform));
1133         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1134
1135         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1136                 test_buffer = compress_test_bufs[i];
1137
1138                 /* Compress with compressdev, decompress with Zlib */
1139                 if (test_deflate_comp_decomp(&test_buffer, 1,
1140                                 &i,
1141                                 &compress_xform,
1142                                 &ts_params->def_decomp_xform,
1143                                 1,
1144                                 RTE_COMP_OP_STATELESS,
1145                                 0,
1146                                 ZLIB_DECOMPRESS) < 0) {
1147                         ret = TEST_FAILED;
1148                         goto exit;
1149                 }
1150
1151                 /* Compress with Zlib, decompress with compressdev */
1152                 if (test_deflate_comp_decomp(&test_buffer, 1,
1153                                 &i,
1154                                 &compress_xform,
1155                                 &ts_params->def_decomp_xform,
1156                                 1,
1157                                 RTE_COMP_OP_STATELESS,
1158                                 0,
1159                                 ZLIB_COMPRESS) < 0) {
1160                         ret = TEST_FAILED;
1161                         goto exit;
1162                 }
1163         }
1164
1165         ret = TEST_SUCCESS;
1166
1167 exit:
1168         rte_free(compress_xform);
1169         return ret;
1170 }
1171
1172 static int
1173 test_compressdev_deflate_stateless_dynamic(void)
1174 {
1175         struct comp_testsuite_params *ts_params = &testsuite_params;
1176         const char *test_buffer;
1177         uint16_t i;
1178         int ret;
1179         struct rte_comp_xform *compress_xform =
1180                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1181
1182         const struct rte_compressdev_capabilities *capab;
1183
1184         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1185         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1186
1187         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1188                 return -ENOTSUP;
1189
1190         if (compress_xform == NULL) {
1191                 RTE_LOG(ERR, USER1,
1192                         "Compress xform could not be created\n");
1193                 ret = TEST_FAILED;
1194                 goto exit;
1195         }
1196
1197         memcpy(compress_xform, ts_params->def_comp_xform,
1198                         sizeof(struct rte_comp_xform));
1199         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1200
1201         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1202                 test_buffer = compress_test_bufs[i];
1203
1204                 /* Compress with compressdev, decompress with Zlib */
1205                 if (test_deflate_comp_decomp(&test_buffer, 1,
1206                                 &i,
1207                                 &compress_xform,
1208                                 &ts_params->def_decomp_xform,
1209                                 1,
1210                                 RTE_COMP_OP_STATELESS,
1211                                 0,
1212                                 ZLIB_DECOMPRESS) < 0) {
1213                         ret = TEST_FAILED;
1214                         goto exit;
1215                 }
1216
1217                 /* Compress with Zlib, decompress with compressdev */
1218                 if (test_deflate_comp_decomp(&test_buffer, 1,
1219                                 &i,
1220                                 &compress_xform,
1221                                 &ts_params->def_decomp_xform,
1222                                 1,
1223                                 RTE_COMP_OP_STATELESS,
1224                                 0,
1225                                 ZLIB_COMPRESS) < 0) {
1226                         ret = TEST_FAILED;
1227                         goto exit;
1228                 }
1229         }
1230
1231         ret = TEST_SUCCESS;
1232
1233 exit:
1234         rte_free(compress_xform);
1235         return ret;
1236 }
1237
1238 static int
1239 test_compressdev_deflate_stateless_multi_op(void)
1240 {
1241         struct comp_testsuite_params *ts_params = &testsuite_params;
1242         uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1243         uint16_t buf_idx[num_bufs];
1244         uint16_t i;
1245
1246         for (i = 0; i < num_bufs; i++)
1247                 buf_idx[i] = i;
1248
1249         /* Compress with compressdev, decompress with Zlib */
1250         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1251                         buf_idx,
1252                         &ts_params->def_comp_xform,
1253                         &ts_params->def_decomp_xform,
1254                         1,
1255                         RTE_COMP_OP_STATELESS,
1256                         0,
1257                         ZLIB_DECOMPRESS) < 0)
1258                 return TEST_FAILED;
1259
1260         /* Compress with Zlib, decompress with compressdev */
1261         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1262                         buf_idx,
1263                         &ts_params->def_comp_xform,
1264                         &ts_params->def_decomp_xform,
1265                         1,
1266                         RTE_COMP_OP_STATELESS,
1267                         0,
1268                         ZLIB_COMPRESS) < 0)
1269                 return TEST_FAILED;
1270
1271         return TEST_SUCCESS;
1272 }
1273
1274 static int
1275 test_compressdev_deflate_stateless_multi_level(void)
1276 {
1277         struct comp_testsuite_params *ts_params = &testsuite_params;
1278         const char *test_buffer;
1279         unsigned int level;
1280         uint16_t i;
1281         int ret;
1282         struct rte_comp_xform *compress_xform =
1283                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1284
1285         if (compress_xform == NULL) {
1286                 RTE_LOG(ERR, USER1,
1287                         "Compress xform could not be created\n");
1288                 ret = TEST_FAILED;
1289                 goto exit;
1290         }
1291
1292         memcpy(compress_xform, ts_params->def_comp_xform,
1293                         sizeof(struct rte_comp_xform));
1294
1295         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1296                 test_buffer = compress_test_bufs[i];
1297                 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1298                                 level++) {
1299                         compress_xform->compress.level = level;
1300                         /* Compress with compressdev, decompress with Zlib */
1301                         if (test_deflate_comp_decomp(&test_buffer, 1,
1302                                         &i,
1303                                         &compress_xform,
1304                                         &ts_params->def_decomp_xform,
1305                                         1,
1306                                         RTE_COMP_OP_STATELESS,
1307                                         0,
1308                                         ZLIB_DECOMPRESS) < 0) {
1309                                 ret = TEST_FAILED;
1310                                 goto exit;
1311                         }
1312                 }
1313         }
1314
1315         ret = TEST_SUCCESS;
1316
1317 exit:
1318         rte_free(compress_xform);
1319         return ret;
1320 }
1321
1322 #define NUM_XFORMS 3
1323 static int
1324 test_compressdev_deflate_stateless_multi_xform(void)
1325 {
1326         struct comp_testsuite_params *ts_params = &testsuite_params;
1327         uint16_t num_bufs = NUM_XFORMS;
1328         struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1329         struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1330         const char *test_buffers[NUM_XFORMS];
1331         uint16_t i;
1332         unsigned int level = RTE_COMP_LEVEL_MIN;
1333         uint16_t buf_idx[num_bufs];
1334
1335         int ret;
1336
1337         /* Create multiple xforms with various levels */
1338         for (i = 0; i < NUM_XFORMS; i++) {
1339                 compress_xforms[i] = rte_malloc(NULL,
1340                                 sizeof(struct rte_comp_xform), 0);
1341                 if (compress_xforms[i] == NULL) {
1342                         RTE_LOG(ERR, USER1,
1343                                 "Compress xform could not be created\n");
1344                         ret = TEST_FAILED;
1345                         goto exit;
1346                 }
1347
1348                 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1349                                 sizeof(struct rte_comp_xform));
1350                 compress_xforms[i]->compress.level = level;
1351                 level++;
1352
1353                 decompress_xforms[i] = rte_malloc(NULL,
1354                                 sizeof(struct rte_comp_xform), 0);
1355                 if (decompress_xforms[i] == NULL) {
1356                         RTE_LOG(ERR, USER1,
1357                                 "Decompress xform could not be created\n");
1358                         ret = TEST_FAILED;
1359                         goto exit;
1360                 }
1361
1362                 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1363                                 sizeof(struct rte_comp_xform));
1364         }
1365
1366         for (i = 0; i < NUM_XFORMS; i++) {
1367                 buf_idx[i] = 0;
1368                 /* Use the same buffer in all sessions */
1369                 test_buffers[i] = compress_test_bufs[0];
1370         }
1371         /* Compress with compressdev, decompress with Zlib */
1372         if (test_deflate_comp_decomp(test_buffers, num_bufs,
1373                         buf_idx,
1374                         compress_xforms,
1375                         decompress_xforms,
1376                         NUM_XFORMS,
1377                         RTE_COMP_OP_STATELESS,
1378                         0,
1379                         ZLIB_DECOMPRESS) < 0) {
1380                 ret = TEST_FAILED;
1381                 goto exit;
1382         }
1383
1384         ret = TEST_SUCCESS;
1385 exit:
1386         for (i = 0; i < NUM_XFORMS; i++) {
1387                 rte_free(compress_xforms[i]);
1388                 rte_free(decompress_xforms[i]);
1389         }
1390
1391         return ret;
1392 }
1393
1394 static int
1395 test_compressdev_deflate_stateless_sgl(void)
1396 {
1397         struct comp_testsuite_params *ts_params = &testsuite_params;
1398         uint16_t i;
1399         const char *test_buffer;
1400         const struct rte_compressdev_capabilities *capab;
1401
1402         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1403         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1404
1405         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1406                 return -ENOTSUP;
1407
1408         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1409                 test_buffer = compress_test_bufs[i];
1410                 /* Compress with compressdev, decompress with Zlib */
1411                 if (test_deflate_comp_decomp(&test_buffer, 1,
1412                                 &i,
1413                                 &ts_params->def_comp_xform,
1414                                 &ts_params->def_decomp_xform,
1415                                 1,
1416                                 RTE_COMP_OP_STATELESS,
1417                                 1,
1418                                 ZLIB_DECOMPRESS) < 0)
1419                         return TEST_FAILED;
1420
1421                 /* Compress with Zlib, decompress with compressdev */
1422                 if (test_deflate_comp_decomp(&test_buffer, 1,
1423                                 &i,
1424                                 &ts_params->def_comp_xform,
1425                                 &ts_params->def_decomp_xform,
1426                                 1,
1427                                 RTE_COMP_OP_STATELESS,
1428                                 1,
1429                                 ZLIB_COMPRESS) < 0)
1430                         return TEST_FAILED;
1431         }
1432
1433         return TEST_SUCCESS;
1434 }
1435
1436 static struct unit_test_suite compressdev_testsuite  = {
1437         .suite_name = "compressdev unit test suite",
1438         .setup = testsuite_setup,
1439         .teardown = testsuite_teardown,
1440         .unit_test_cases = {
1441                 TEST_CASE_ST(NULL, NULL,
1442                         test_compressdev_invalid_configuration),
1443                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1444                         test_compressdev_deflate_stateless_fixed),
1445                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1446                         test_compressdev_deflate_stateless_dynamic),
1447                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1448                         test_compressdev_deflate_stateless_multi_op),
1449                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1450                         test_compressdev_deflate_stateless_multi_level),
1451                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1452                         test_compressdev_deflate_stateless_multi_xform),
1453                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1454                         test_compressdev_deflate_stateless_sgl),
1455                 TEST_CASES_END() /**< NULL terminate unit test array */
1456         }
1457 };
1458
1459 static int
1460 test_compressdev(void)
1461 {
1462         return unit_test_suite_runner(&compressdev_testsuite);
1463 }
1464
1465 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);