mbuf: implement generic format for sched field
[dpdk.git] / test / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
14
15 #include "test_compressdev_test_buffer.h"
16 #include "test.h"
17
18 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
19
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
24
25 /*
26  * 30% extra size for compressed data compared to original data,
27  * in case data size cannot be reduced and it is actually bigger
28  * due to the compress block headers
29  */
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
33 #define MAX_SEGS 16
34 #define NUM_OPS 16
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
37 #define CACHE_SIZE 0
38
39 const char *
40 huffman_type_strings[] = {
41         [RTE_COMP_HUFFMAN_DEFAULT]      = "PMD default",
42         [RTE_COMP_HUFFMAN_FIXED]        = "Fixed",
43         [RTE_COMP_HUFFMAN_DYNAMIC]      = "Dynamic"
44 };
45
46 enum zlib_direction {
47         ZLIB_NONE,
48         ZLIB_COMPRESS,
49         ZLIB_DECOMPRESS,
50         ZLIB_ALL
51 };
52
53 struct priv_op_data {
54         uint16_t orig_idx;
55 };
56
57 struct comp_testsuite_params {
58         struct rte_mempool *large_mbuf_pool;
59         struct rte_mempool *small_mbuf_pool;
60         struct rte_mempool *op_pool;
61         struct rte_comp_xform *def_comp_xform;
62         struct rte_comp_xform *def_decomp_xform;
63 };
64
65 static struct comp_testsuite_params testsuite_params = { 0 };
66
67 static void
68 testsuite_teardown(void)
69 {
70         struct comp_testsuite_params *ts_params = &testsuite_params;
71
72         if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
73                 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
74         if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
75                 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
76         if (rte_mempool_in_use_count(ts_params->op_pool))
77                 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
78
79         rte_mempool_free(ts_params->large_mbuf_pool);
80         rte_mempool_free(ts_params->small_mbuf_pool);
81         rte_mempool_free(ts_params->op_pool);
82         rte_free(ts_params->def_comp_xform);
83         rte_free(ts_params->def_decomp_xform);
84 }
85
86 static int
87 testsuite_setup(void)
88 {
89         struct comp_testsuite_params *ts_params = &testsuite_params;
90         uint32_t max_buf_size = 0;
91         unsigned int i;
92
93         if (rte_compressdev_count() == 0) {
94                 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
95                 return TEST_FAILED;
96         }
97
98         RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
99                                 rte_compressdev_name_get(0));
100
101         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
102                 max_buf_size = RTE_MAX(max_buf_size,
103                                 strlen(compress_test_bufs[i]) + 1);
104
105         /*
106          * Buffers to be used in compression and decompression.
107          * Since decompressed data might be larger than
108          * compressed data (due to block header),
109          * buffers should be big enough for both cases.
110          */
111         max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
112         ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
113                         NUM_LARGE_MBUFS,
114                         CACHE_SIZE, 0,
115                         max_buf_size + RTE_PKTMBUF_HEADROOM,
116                         rte_socket_id());
117         if (ts_params->large_mbuf_pool == NULL) {
118                 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
119                 return TEST_FAILED;
120         }
121
122         /* Create mempool with smaller buffers for SGL testing */
123         ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
124                         NUM_LARGE_MBUFS * MAX_SEGS,
125                         CACHE_SIZE, 0,
126                         SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
127                         rte_socket_id());
128         if (ts_params->small_mbuf_pool == NULL) {
129                 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
130                 goto exit;
131         }
132
133         ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
134                                 0, sizeof(struct priv_op_data),
135                                 rte_socket_id());
136         if (ts_params->op_pool == NULL) {
137                 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
138                 goto exit;
139         }
140
141         ts_params->def_comp_xform =
142                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143         if (ts_params->def_comp_xform == NULL) {
144                 RTE_LOG(ERR, USER1,
145                         "Default compress xform could not be created\n");
146                 goto exit;
147         }
148         ts_params->def_decomp_xform =
149                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
150         if (ts_params->def_decomp_xform == NULL) {
151                 RTE_LOG(ERR, USER1,
152                         "Default decompress xform could not be created\n");
153                 goto exit;
154         }
155
156         /* Initializes default values for compress/decompress xforms */
157         ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
158         ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
159         ts_params->def_comp_xform->compress.deflate.huffman =
160                                                 RTE_COMP_HUFFMAN_DEFAULT;
161         ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
162         ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
163         ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
164
165         ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
166         ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
167         ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
168         ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
169
170         return TEST_SUCCESS;
171
172 exit:
173         testsuite_teardown();
174
175         return TEST_FAILED;
176 }
177
178 static int
179 generic_ut_setup(void)
180 {
181         /* Configure compressdev (one device, one queue pair) */
182         struct rte_compressdev_config config = {
183                 .socket_id = rte_socket_id(),
184                 .nb_queue_pairs = 1,
185                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
186                 .max_nb_streams = 0
187         };
188
189         if (rte_compressdev_configure(0, &config) < 0) {
190                 RTE_LOG(ERR, USER1, "Device configuration failed\n");
191                 return -1;
192         }
193
194         if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
195                         rte_socket_id()) < 0) {
196                 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
197                 return -1;
198         }
199
200         if (rte_compressdev_start(0) < 0) {
201                 RTE_LOG(ERR, USER1, "Device could not be started\n");
202                 return -1;
203         }
204
205         return 0;
206 }
207
208 static void
209 generic_ut_teardown(void)
210 {
211         rte_compressdev_stop(0);
212         if (rte_compressdev_close(0) < 0)
213                 RTE_LOG(ERR, USER1, "Device could not be closed\n");
214 }
215
216 static int
217 test_compressdev_invalid_configuration(void)
218 {
219         struct rte_compressdev_config invalid_config;
220         struct rte_compressdev_config valid_config = {
221                 .socket_id = rte_socket_id(),
222                 .nb_queue_pairs = 1,
223                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
224                 .max_nb_streams = 0
225         };
226         struct rte_compressdev_info dev_info;
227
228         /* Invalid configuration with 0 queue pairs */
229         memcpy(&invalid_config, &valid_config,
230                         sizeof(struct rte_compressdev_config));
231         invalid_config.nb_queue_pairs = 0;
232
233         TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
234                         "Device configuration was successful "
235                         "with no queue pairs (invalid)\n");
236
237         /*
238          * Invalid configuration with too many queue pairs
239          * (if there is an actual maximum number of queue pairs)
240          */
241         rte_compressdev_info_get(0, &dev_info);
242         if (dev_info.max_nb_queue_pairs != 0) {
243                 memcpy(&invalid_config, &valid_config,
244                         sizeof(struct rte_compressdev_config));
245                 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
246
247                 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
248                                 "Device configuration was successful "
249                                 "with too many queue pairs (invalid)\n");
250         }
251
252         /* Invalid queue pair setup, with no number of queue pairs set */
253         TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
254                                 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
255                         "Queue pair setup was successful "
256                         "with no queue pairs set (invalid)\n");
257
258         return TEST_SUCCESS;
259 }
260
261 static int
262 compare_buffers(const char *buffer1, uint32_t buffer1_len,
263                 const char *buffer2, uint32_t buffer2_len)
264 {
265         if (buffer1_len != buffer2_len) {
266                 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
267                 return -1;
268         }
269
270         if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
271                 RTE_LOG(ERR, USER1, "Buffers are different\n");
272                 return -1;
273         }
274
275         return 0;
276 }
277
278 /*
279  * Maps compressdev and Zlib flush flags
280  */
281 static int
282 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
283 {
284         switch (flag) {
285         case RTE_COMP_FLUSH_NONE:
286                 return Z_NO_FLUSH;
287         case RTE_COMP_FLUSH_SYNC:
288                 return Z_SYNC_FLUSH;
289         case RTE_COMP_FLUSH_FULL:
290                 return Z_FULL_FLUSH;
291         case RTE_COMP_FLUSH_FINAL:
292                 return Z_FINISH;
293         /*
294          * There should be only the values above,
295          * so this should never happen
296          */
297         default:
298                 return -1;
299         }
300 }
301
302 static int
303 compress_zlib(struct rte_comp_op *op,
304                 const struct rte_comp_xform *xform, int mem_level)
305 {
306         z_stream stream;
307         int zlib_flush;
308         int strategy, window_bits, comp_level;
309         int ret = TEST_FAILED;
310         uint8_t *single_src_buf = NULL;
311         uint8_t *single_dst_buf = NULL;
312
313         /* initialize zlib stream */
314         stream.zalloc = Z_NULL;
315         stream.zfree = Z_NULL;
316         stream.opaque = Z_NULL;
317
318         if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
319                 strategy = Z_FIXED;
320         else
321                 strategy = Z_DEFAULT_STRATEGY;
322
323         /*
324          * Window bits is the base two logarithm of the window size (in bytes).
325          * When doing raw DEFLATE, this number will be negative.
326          */
327         window_bits = -(xform->compress.window_size);
328
329         comp_level = xform->compress.level;
330
331         if (comp_level != RTE_COMP_LEVEL_NONE)
332                 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
333                         window_bits, mem_level, strategy);
334         else
335                 ret = deflateInit(&stream, Z_NO_COMPRESSION);
336
337         if (ret != Z_OK) {
338                 printf("Zlib deflate could not be initialized\n");
339                 goto exit;
340         }
341
342         /* Assuming stateless operation */
343         /* SGL */
344         if (op->m_src->nb_segs > 1) {
345                 single_src_buf = rte_malloc(NULL,
346                                 rte_pktmbuf_pkt_len(op->m_src), 0);
347                 if (single_src_buf == NULL) {
348                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
349                         goto exit;
350                 }
351                 single_dst_buf = rte_malloc(NULL,
352                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
353                 if (single_dst_buf == NULL) {
354                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
355                         goto exit;
356                 }
357                 if (rte_pktmbuf_read(op->m_src, 0,
358                                         rte_pktmbuf_pkt_len(op->m_src),
359                                         single_src_buf) == NULL) {
360                         RTE_LOG(ERR, USER1,
361                                 "Buffer could not be read entirely\n");
362                         goto exit;
363                 }
364
365                 stream.avail_in = op->src.length;
366                 stream.next_in = single_src_buf;
367                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
368                 stream.next_out = single_dst_buf;
369
370         } else {
371                 stream.avail_in = op->src.length;
372                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
373                 stream.avail_out = op->m_dst->data_len;
374                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
375         }
376         /* Stateless operation, all buffer will be compressed in one go */
377         zlib_flush = map_zlib_flush_flag(op->flush_flag);
378         ret = deflate(&stream, zlib_flush);
379
380         if (stream.avail_in != 0) {
381                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
382                 goto exit;
383         }
384
385         if (ret != Z_STREAM_END)
386                 goto exit;
387
388         /* Copy data to destination SGL */
389         if (op->m_src->nb_segs > 1) {
390                 uint32_t remaining_data = stream.total_out;
391                 uint8_t *src_data = single_dst_buf;
392                 struct rte_mbuf *dst_buf = op->m_dst;
393
394                 while (remaining_data > 0) {
395                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
396                                         uint8_t *);
397                         /* Last segment */
398                         if (remaining_data < dst_buf->data_len) {
399                                 memcpy(dst_data, src_data, remaining_data);
400                                 remaining_data = 0;
401                         } else {
402                                 memcpy(dst_data, src_data, dst_buf->data_len);
403                                 remaining_data -= dst_buf->data_len;
404                                 src_data += dst_buf->data_len;
405                                 dst_buf = dst_buf->next;
406                         }
407                 }
408         }
409
410         op->consumed = stream.total_in;
411         op->produced = stream.total_out;
412         op->status = RTE_COMP_OP_STATUS_SUCCESS;
413
414         deflateReset(&stream);
415
416         ret = 0;
417 exit:
418         deflateEnd(&stream);
419         rte_free(single_src_buf);
420         rte_free(single_dst_buf);
421
422         return ret;
423 }
424
425 static int
426 decompress_zlib(struct rte_comp_op *op,
427                 const struct rte_comp_xform *xform)
428 {
429         z_stream stream;
430         int window_bits;
431         int zlib_flush;
432         int ret = TEST_FAILED;
433         uint8_t *single_src_buf = NULL;
434         uint8_t *single_dst_buf = NULL;
435
436         /* initialize zlib stream */
437         stream.zalloc = Z_NULL;
438         stream.zfree = Z_NULL;
439         stream.opaque = Z_NULL;
440
441         /*
442          * Window bits is the base two logarithm of the window size (in bytes).
443          * When doing raw DEFLATE, this number will be negative.
444          */
445         window_bits = -(xform->decompress.window_size);
446
447         ret = inflateInit2(&stream, window_bits);
448
449         if (ret != Z_OK) {
450                 printf("Zlib deflate could not be initialized\n");
451                 goto exit;
452         }
453
454         /* Assuming stateless operation */
455         /* SGL */
456         if (op->m_src->nb_segs > 1) {
457                 single_src_buf = rte_malloc(NULL,
458                                 rte_pktmbuf_pkt_len(op->m_src), 0);
459                 if (single_src_buf == NULL) {
460                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
461                         goto exit;
462                 }
463                 single_dst_buf = rte_malloc(NULL,
464                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
465                 if (single_dst_buf == NULL) {
466                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
467                         goto exit;
468                 }
469                 if (rte_pktmbuf_read(op->m_src, 0,
470                                         rte_pktmbuf_pkt_len(op->m_src),
471                                         single_src_buf) == NULL) {
472                         RTE_LOG(ERR, USER1,
473                                 "Buffer could not be read entirely\n");
474                         goto exit;
475                 }
476
477                 stream.avail_in = op->src.length;
478                 stream.next_in = single_src_buf;
479                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
480                 stream.next_out = single_dst_buf;
481
482         } else {
483                 stream.avail_in = op->src.length;
484                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
485                 stream.avail_out = op->m_dst->data_len;
486                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
487         }
488
489         /* Stateless operation, all buffer will be compressed in one go */
490         zlib_flush = map_zlib_flush_flag(op->flush_flag);
491         ret = inflate(&stream, zlib_flush);
492
493         if (stream.avail_in != 0) {
494                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
495                 goto exit;
496         }
497
498         if (ret != Z_STREAM_END)
499                 goto exit;
500
501         if (op->m_src->nb_segs > 1) {
502                 uint32_t remaining_data = stream.total_out;
503                 uint8_t *src_data = single_dst_buf;
504                 struct rte_mbuf *dst_buf = op->m_dst;
505
506                 while (remaining_data > 0) {
507                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
508                                         uint8_t *);
509                         /* Last segment */
510                         if (remaining_data < dst_buf->data_len) {
511                                 memcpy(dst_data, src_data, remaining_data);
512                                 remaining_data = 0;
513                         } else {
514                                 memcpy(dst_data, src_data, dst_buf->data_len);
515                                 remaining_data -= dst_buf->data_len;
516                                 src_data += dst_buf->data_len;
517                                 dst_buf = dst_buf->next;
518                         }
519                 }
520         }
521
522         op->consumed = stream.total_in;
523         op->produced = stream.total_out;
524         op->status = RTE_COMP_OP_STATUS_SUCCESS;
525
526         inflateReset(&stream);
527
528         ret = 0;
529 exit:
530         inflateEnd(&stream);
531
532         return ret;
533 }
534
535 static int
536 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
537                 uint32_t total_data_size,
538                 struct rte_mempool *small_mbuf_pool,
539                 struct rte_mempool *large_mbuf_pool,
540                 uint8_t limit_segs_in_sgl)
541 {
542         uint32_t remaining_data = total_data_size;
543         uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
544         struct rte_mempool *pool;
545         struct rte_mbuf *next_seg;
546         uint32_t data_size;
547         char *buf_ptr;
548         const char *data_ptr = test_buf;
549         uint16_t i;
550         int ret;
551
552         if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
553                 num_remaining_segs = limit_segs_in_sgl - 1;
554
555         /*
556          * Allocate data in the first segment (header) and
557          * copy data if test buffer is provided
558          */
559         if (remaining_data < SMALL_SEG_SIZE)
560                 data_size = remaining_data;
561         else
562                 data_size = SMALL_SEG_SIZE;
563         buf_ptr = rte_pktmbuf_append(head_buf, data_size);
564         if (buf_ptr == NULL) {
565                 RTE_LOG(ERR, USER1,
566                         "Not enough space in the 1st buffer\n");
567                 return -1;
568         }
569
570         if (data_ptr != NULL) {
571                 /* Copy characters without NULL terminator */
572                 strncpy(buf_ptr, data_ptr, data_size);
573                 data_ptr += data_size;
574         }
575         remaining_data -= data_size;
576         num_remaining_segs--;
577
578         /*
579          * Allocate the rest of the segments,
580          * copy the rest of the data and chain the segments.
581          */
582         for (i = 0; i < num_remaining_segs; i++) {
583
584                 if (i == (num_remaining_segs - 1)) {
585                         /* last segment */
586                         if (remaining_data > SMALL_SEG_SIZE)
587                                 pool = large_mbuf_pool;
588                         else
589                                 pool = small_mbuf_pool;
590                         data_size = remaining_data;
591                 } else {
592                         data_size = SMALL_SEG_SIZE;
593                         pool = small_mbuf_pool;
594                 }
595
596                 next_seg = rte_pktmbuf_alloc(pool);
597                 if (next_seg == NULL) {
598                         RTE_LOG(ERR, USER1,
599                                 "New segment could not be allocated "
600                                 "from the mempool\n");
601                         return -1;
602                 }
603                 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
604                 if (buf_ptr == NULL) {
605                         RTE_LOG(ERR, USER1,
606                                 "Not enough space in the buffer\n");
607                         rte_pktmbuf_free(next_seg);
608                         return -1;
609                 }
610                 if (data_ptr != NULL) {
611                         /* Copy characters without NULL terminator */
612                         strncpy(buf_ptr, data_ptr, data_size);
613                         data_ptr += data_size;
614                 }
615                 remaining_data -= data_size;
616
617                 ret = rte_pktmbuf_chain(head_buf, next_seg);
618                 if (ret != 0) {
619                         rte_pktmbuf_free(next_seg);
620                         RTE_LOG(ERR, USER1,
621                                 "Segment could not chained\n");
622                         return -1;
623                 }
624         }
625
626         return 0;
627 }
628
629 /*
630  * Compresses and decompresses buffer with compressdev API and Zlib API
631  */
632 static int
633 test_deflate_comp_decomp(const char * const test_bufs[],
634                 unsigned int num_bufs,
635                 uint16_t buf_idx[],
636                 struct rte_comp_xform *compress_xforms[],
637                 struct rte_comp_xform *decompress_xforms[],
638                 unsigned int num_xforms,
639                 enum rte_comp_op_type state,
640                 unsigned int sgl,
641                 enum zlib_direction zlib_dir)
642 {
643         struct comp_testsuite_params *ts_params = &testsuite_params;
644         int ret_status = -1;
645         int ret;
646         struct rte_mbuf *uncomp_bufs[num_bufs];
647         struct rte_mbuf *comp_bufs[num_bufs];
648         struct rte_comp_op *ops[num_bufs];
649         struct rte_comp_op *ops_processed[num_bufs];
650         void *priv_xforms[num_bufs];
651         uint16_t num_enqd, num_deqd, num_total_deqd;
652         uint16_t num_priv_xforms = 0;
653         unsigned int deqd_retries = 0;
654         struct priv_op_data *priv_data;
655         char *buf_ptr;
656         unsigned int i;
657         struct rte_mempool *buf_pool;
658         uint32_t data_size;
659         const struct rte_compressdev_capabilities *capa =
660                 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
661         char *contig_buf = NULL;
662
663         /* Initialize all arrays to NULL */
664         memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
665         memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
666         memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
667         memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
668         memset(priv_xforms, 0, sizeof(void *) * num_bufs);
669
670         if (sgl)
671                 buf_pool = ts_params->small_mbuf_pool;
672         else
673                 buf_pool = ts_params->large_mbuf_pool;
674
675         /* Prepare the source mbufs with the data */
676         ret = rte_pktmbuf_alloc_bulk(buf_pool,
677                                 uncomp_bufs, num_bufs);
678         if (ret < 0) {
679                 RTE_LOG(ERR, USER1,
680                         "Source mbufs could not be allocated "
681                         "from the mempool\n");
682                 goto exit;
683         }
684
685         if (sgl) {
686                 for (i = 0; i < num_bufs; i++) {
687                         data_size = strlen(test_bufs[i]) + 1;
688                         if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
689                                         data_size,
690                                         ts_params->small_mbuf_pool,
691                                         ts_params->large_mbuf_pool,
692                                         MAX_SEGS) < 0)
693                                 goto exit;
694                 }
695         } else {
696                 for (i = 0; i < num_bufs; i++) {
697                         data_size = strlen(test_bufs[i]) + 1;
698                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
699                         snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
700                 }
701         }
702
703         /* Prepare the destination mbufs */
704         ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
705         if (ret < 0) {
706                 RTE_LOG(ERR, USER1,
707                         "Destination mbufs could not be allocated "
708                         "from the mempool\n");
709                 goto exit;
710         }
711
712         if (sgl) {
713                 for (i = 0; i < num_bufs; i++) {
714                         data_size = strlen(test_bufs[i]) *
715                                 COMPRESS_BUF_SIZE_RATIO;
716                         if (prepare_sgl_bufs(NULL, comp_bufs[i],
717                                         data_size,
718                                         ts_params->small_mbuf_pool,
719                                         ts_params->large_mbuf_pool,
720                                         MAX_SEGS) < 0)
721                                 goto exit;
722                 }
723
724         } else {
725                 for (i = 0; i < num_bufs; i++) {
726                         data_size = strlen(test_bufs[i]) *
727                                 COMPRESS_BUF_SIZE_RATIO;
728                         rte_pktmbuf_append(comp_bufs[i], data_size);
729                 }
730         }
731
732         /* Build the compression operations */
733         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
734         if (ret < 0) {
735                 RTE_LOG(ERR, USER1,
736                         "Compress operations could not be allocated "
737                         "from the mempool\n");
738                 goto exit;
739         }
740
741
742         for (i = 0; i < num_bufs; i++) {
743                 ops[i]->m_src = uncomp_bufs[i];
744                 ops[i]->m_dst = comp_bufs[i];
745                 ops[i]->src.offset = 0;
746                 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
747                 ops[i]->dst.offset = 0;
748                 if (state == RTE_COMP_OP_STATELESS) {
749                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
750                 } else {
751                         RTE_LOG(ERR, USER1,
752                                 "Stateful operations are not supported "
753                                 "in these tests yet\n");
754                         goto exit;
755                 }
756                 ops[i]->input_chksum = 0;
757                 /*
758                  * Store original operation index in private data,
759                  * since ordering does not have to be maintained,
760                  * when dequeueing from compressdev, so a comparison
761                  * at the end of the test can be done.
762                  */
763                 priv_data = (struct priv_op_data *) (ops[i] + 1);
764                 priv_data->orig_idx = i;
765         }
766
767         /* Compress data (either with Zlib API or compressdev API */
768         if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
769                 for (i = 0; i < num_bufs; i++) {
770                         const struct rte_comp_xform *compress_xform =
771                                 compress_xforms[i % num_xforms];
772                         ret = compress_zlib(ops[i], compress_xform,
773                                         DEFAULT_MEM_LEVEL);
774                         if (ret < 0)
775                                 goto exit;
776
777                         ops_processed[i] = ops[i];
778                 }
779         } else {
780                 /* Create compress private xform data */
781                 for (i = 0; i < num_xforms; i++) {
782                         ret = rte_compressdev_private_xform_create(0,
783                                 (const struct rte_comp_xform *)compress_xforms[i],
784                                 &priv_xforms[i]);
785                         if (ret < 0) {
786                                 RTE_LOG(ERR, USER1,
787                                         "Compression private xform "
788                                         "could not be created\n");
789                                 goto exit;
790                         }
791                         num_priv_xforms++;
792                 }
793
794                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
795                         /* Attach shareable private xform data to ops */
796                         for (i = 0; i < num_bufs; i++)
797                                 ops[i]->private_xform = priv_xforms[i % num_xforms];
798                 } else {
799                         /* Create rest of the private xforms for the other ops */
800                         for (i = num_xforms; i < num_bufs; i++) {
801                                 ret = rte_compressdev_private_xform_create(0,
802                                         compress_xforms[i % num_xforms],
803                                         &priv_xforms[i]);
804                                 if (ret < 0) {
805                                         RTE_LOG(ERR, USER1,
806                                                 "Compression private xform "
807                                                 "could not be created\n");
808                                         goto exit;
809                                 }
810                                 num_priv_xforms++;
811                         }
812
813                         /* Attach non shareable private xform data to ops */
814                         for (i = 0; i < num_bufs; i++)
815                                 ops[i]->private_xform = priv_xforms[i];
816                 }
817
818                 /* Enqueue and dequeue all operations */
819                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
820                 if (num_enqd < num_bufs) {
821                         RTE_LOG(ERR, USER1,
822                                 "The operations could not be enqueued\n");
823                         goto exit;
824                 }
825
826                 num_total_deqd = 0;
827                 do {
828                         /*
829                          * If retrying a dequeue call, wait for 10 ms to allow
830                          * enough time to the driver to process the operations
831                          */
832                         if (deqd_retries != 0) {
833                                 /*
834                                  * Avoid infinite loop if not all the
835                                  * operations get out of the device
836                                  */
837                                 if (deqd_retries == MAX_DEQD_RETRIES) {
838                                         RTE_LOG(ERR, USER1,
839                                                 "Not all operations could be "
840                                                 "dequeued\n");
841                                         goto exit;
842                                 }
843                                 usleep(DEQUEUE_WAIT_TIME);
844                         }
845                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
846                                         &ops_processed[num_total_deqd], num_bufs);
847                         num_total_deqd += num_deqd;
848                         deqd_retries++;
849                 } while (num_total_deqd < num_enqd);
850
851                 deqd_retries = 0;
852
853                 /* Free compress private xforms */
854                 for (i = 0; i < num_priv_xforms; i++) {
855                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
856                         priv_xforms[i] = NULL;
857                 }
858                 num_priv_xforms = 0;
859         }
860
861         for (i = 0; i < num_bufs; i++) {
862                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
863                 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
864                 const struct rte_comp_compress_xform *compress_xform =
865                                 &compress_xforms[xform_idx]->compress;
866                 enum rte_comp_huffman huffman_type =
867                         compress_xform->deflate.huffman;
868                 char engine[] = "zlib (directly, not PMD)";
869                 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
870                         strlcpy(engine, "PMD", sizeof(engine));
871
872                 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
873                         " %u bytes (level = %d, huffman = %s)\n",
874                         buf_idx[priv_data->orig_idx], engine,
875                         ops_processed[i]->consumed, ops_processed[i]->produced,
876                         compress_xform->level,
877                         huffman_type_strings[huffman_type]);
878                 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
879                         ops_processed[i]->consumed == 0 ? 0 :
880                         (float)ops_processed[i]->produced /
881                         ops_processed[i]->consumed * 100);
882                 ops[i] = NULL;
883         }
884
885         /*
886          * Check operation status and free source mbufs (destination mbuf and
887          * compress operation information is needed for the decompression stage)
888          */
889         for (i = 0; i < num_bufs; i++) {
890                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
891                         RTE_LOG(ERR, USER1,
892                                 "Some operations were not successful\n");
893                         goto exit;
894                 }
895                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
896                 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
897                 uncomp_bufs[priv_data->orig_idx] = NULL;
898         }
899
900         /* Allocate buffers for decompressed data */
901         ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
902         if (ret < 0) {
903                 RTE_LOG(ERR, USER1,
904                         "Destination mbufs could not be allocated "
905                         "from the mempool\n");
906                 goto exit;
907         }
908
909         if (sgl) {
910                 for (i = 0; i < num_bufs; i++) {
911                         priv_data = (struct priv_op_data *)
912                                         (ops_processed[i] + 1);
913                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
914                         if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
915                                         data_size,
916                                         ts_params->small_mbuf_pool,
917                                         ts_params->large_mbuf_pool,
918                                         MAX_SEGS) < 0)
919                                 goto exit;
920                 }
921
922         } else {
923                 for (i = 0; i < num_bufs; i++) {
924                         priv_data = (struct priv_op_data *)
925                                         (ops_processed[i] + 1);
926                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
927                         rte_pktmbuf_append(uncomp_bufs[i], data_size);
928                 }
929         }
930
931         /* Build the decompression operations */
932         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
933         if (ret < 0) {
934                 RTE_LOG(ERR, USER1,
935                         "Decompress operations could not be allocated "
936                         "from the mempool\n");
937                 goto exit;
938         }
939
940         /* Source buffer is the compressed data from the previous operations */
941         for (i = 0; i < num_bufs; i++) {
942                 ops[i]->m_src = ops_processed[i]->m_dst;
943                 ops[i]->m_dst = uncomp_bufs[i];
944                 ops[i]->src.offset = 0;
945                 /*
946                  * Set the length of the compressed data to the
947                  * number of bytes that were produced in the previous stage
948                  */
949                 ops[i]->src.length = ops_processed[i]->produced;
950                 ops[i]->dst.offset = 0;
951                 if (state == RTE_COMP_OP_STATELESS) {
952                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
953                 } else {
954                         RTE_LOG(ERR, USER1,
955                                 "Stateful operations are not supported "
956                                 "in these tests yet\n");
957                         goto exit;
958                 }
959                 ops[i]->input_chksum = 0;
960                 /*
961                  * Copy private data from previous operations,
962                  * to keep the pointer to the original buffer
963                  */
964                 memcpy(ops[i] + 1, ops_processed[i] + 1,
965                                 sizeof(struct priv_op_data));
966         }
967
968         /*
969          * Free the previous compress operations,
970          * as they are not needed anymore
971          */
972         rte_comp_op_bulk_free(ops_processed, num_bufs);
973
974         /* Decompress data (either with Zlib API or compressdev API */
975         if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
976                 for (i = 0; i < num_bufs; i++) {
977                         priv_data = (struct priv_op_data *)(ops[i] + 1);
978                         uint16_t xform_idx = priv_data->orig_idx % num_xforms;
979                         const struct rte_comp_xform *decompress_xform =
980                                 decompress_xforms[xform_idx];
981
982                         ret = decompress_zlib(ops[i], decompress_xform);
983                         if (ret < 0)
984                                 goto exit;
985
986                         ops_processed[i] = ops[i];
987                 }
988         } else {
989                 /* Create decompress private xform data */
990                 for (i = 0; i < num_xforms; i++) {
991                         ret = rte_compressdev_private_xform_create(0,
992                                 (const struct rte_comp_xform *)decompress_xforms[i],
993                                 &priv_xforms[i]);
994                         if (ret < 0) {
995                                 RTE_LOG(ERR, USER1,
996                                         "Decompression private xform "
997                                         "could not be created\n");
998                                 goto exit;
999                         }
1000                         num_priv_xforms++;
1001                 }
1002
1003                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1004                         /* Attach shareable private xform data to ops */
1005                         for (i = 0; i < num_bufs; i++) {
1006                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1007                                 uint16_t xform_idx = priv_data->orig_idx %
1008                                                                 num_xforms;
1009                                 ops[i]->private_xform = priv_xforms[xform_idx];
1010                         }
1011                 } else {
1012                         /* Create rest of the private xforms for the other ops */
1013                         for (i = num_xforms; i < num_bufs; i++) {
1014                                 ret = rte_compressdev_private_xform_create(0,
1015                                         decompress_xforms[i % num_xforms],
1016                                         &priv_xforms[i]);
1017                                 if (ret < 0) {
1018                                         RTE_LOG(ERR, USER1,
1019                                                 "Decompression private xform "
1020                                                 "could not be created\n");
1021                                         goto exit;
1022                                 }
1023                                 num_priv_xforms++;
1024                         }
1025
1026                         /* Attach non shareable private xform data to ops */
1027                         for (i = 0; i < num_bufs; i++) {
1028                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1029                                 uint16_t xform_idx = priv_data->orig_idx;
1030                                 ops[i]->private_xform = priv_xforms[xform_idx];
1031                         }
1032                 }
1033
1034                 /* Enqueue and dequeue all operations */
1035                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1036                 if (num_enqd < num_bufs) {
1037                         RTE_LOG(ERR, USER1,
1038                                 "The operations could not be enqueued\n");
1039                         goto exit;
1040                 }
1041
1042                 num_total_deqd = 0;
1043                 do {
1044                         /*
1045                          * If retrying a dequeue call, wait for 10 ms to allow
1046                          * enough time to the driver to process the operations
1047                          */
1048                         if (deqd_retries != 0) {
1049                                 /*
1050                                  * Avoid infinite loop if not all the
1051                                  * operations get out of the device
1052                                  */
1053                                 if (deqd_retries == MAX_DEQD_RETRIES) {
1054                                         RTE_LOG(ERR, USER1,
1055                                                 "Not all operations could be "
1056                                                 "dequeued\n");
1057                                         goto exit;
1058                                 }
1059                                 usleep(DEQUEUE_WAIT_TIME);
1060                         }
1061                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
1062                                         &ops_processed[num_total_deqd], num_bufs);
1063                         num_total_deqd += num_deqd;
1064                         deqd_retries++;
1065                 } while (num_total_deqd < num_enqd);
1066
1067                 deqd_retries = 0;
1068         }
1069
1070         for (i = 0; i < num_bufs; i++) {
1071                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1072                 char engine[] = "zlib, (directly, no PMD)";
1073                 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1074                         strlcpy(engine, "pmd", sizeof(engine));
1075                 RTE_LOG(DEBUG, USER1,
1076                         "Buffer %u decompressed by %s from %u to %u bytes\n",
1077                         buf_idx[priv_data->orig_idx], engine,
1078                         ops_processed[i]->consumed, ops_processed[i]->produced);
1079                 ops[i] = NULL;
1080         }
1081
1082         /*
1083          * Check operation status and free source mbuf (destination mbuf and
1084          * compress operation information is still needed)
1085          */
1086         for (i = 0; i < num_bufs; i++) {
1087                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1088                         RTE_LOG(ERR, USER1,
1089                                 "Some operations were not successful\n");
1090                         goto exit;
1091                 }
1092                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1093                 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1094                 comp_bufs[priv_data->orig_idx] = NULL;
1095         }
1096
1097         /*
1098          * Compare the original stream with the decompressed stream
1099          * (in size and the data)
1100          */
1101         for (i = 0; i < num_bufs; i++) {
1102                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1103                 const char *buf1 = test_bufs[priv_data->orig_idx];
1104                 const char *buf2;
1105                 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1106                 if (contig_buf == NULL) {
1107                         RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1108                                         "be allocated\n");
1109                         goto exit;
1110                 }
1111
1112                 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1113                                 ops_processed[i]->produced, contig_buf);
1114
1115                 if (compare_buffers(buf1, strlen(buf1) + 1,
1116                                 buf2, ops_processed[i]->produced) < 0)
1117                         goto exit;
1118
1119                 rte_free(contig_buf);
1120                 contig_buf = NULL;
1121         }
1122
1123         ret_status = 0;
1124
1125 exit:
1126         /* Free resources */
1127         for (i = 0; i < num_bufs; i++) {
1128                 rte_pktmbuf_free(uncomp_bufs[i]);
1129                 rte_pktmbuf_free(comp_bufs[i]);
1130                 rte_comp_op_free(ops[i]);
1131                 rte_comp_op_free(ops_processed[i]);
1132         }
1133         for (i = 0; i < num_priv_xforms; i++) {
1134                 if (priv_xforms[i] != NULL)
1135                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1136         }
1137         rte_free(contig_buf);
1138
1139         return ret_status;
1140 }
1141
1142 static int
1143 test_compressdev_deflate_stateless_fixed(void)
1144 {
1145         struct comp_testsuite_params *ts_params = &testsuite_params;
1146         const char *test_buffer;
1147         uint16_t i;
1148         int ret;
1149         const struct rte_compressdev_capabilities *capab;
1150
1151         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1152         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1153
1154         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1155                 return -ENOTSUP;
1156
1157         struct rte_comp_xform *compress_xform =
1158                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1159
1160         if (compress_xform == NULL) {
1161                 RTE_LOG(ERR, USER1,
1162                         "Compress xform could not be created\n");
1163                 ret = TEST_FAILED;
1164                 goto exit;
1165         }
1166
1167         memcpy(compress_xform, ts_params->def_comp_xform,
1168                         sizeof(struct rte_comp_xform));
1169         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1170
1171         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1172                 test_buffer = compress_test_bufs[i];
1173
1174                 /* Compress with compressdev, decompress with Zlib */
1175                 if (test_deflate_comp_decomp(&test_buffer, 1,
1176                                 &i,
1177                                 &compress_xform,
1178                                 &ts_params->def_decomp_xform,
1179                                 1,
1180                                 RTE_COMP_OP_STATELESS,
1181                                 0,
1182                                 ZLIB_DECOMPRESS) < 0) {
1183                         ret = TEST_FAILED;
1184                         goto exit;
1185                 }
1186
1187                 /* Compress with Zlib, decompress with compressdev */
1188                 if (test_deflate_comp_decomp(&test_buffer, 1,
1189                                 &i,
1190                                 &compress_xform,
1191                                 &ts_params->def_decomp_xform,
1192                                 1,
1193                                 RTE_COMP_OP_STATELESS,
1194                                 0,
1195                                 ZLIB_COMPRESS) < 0) {
1196                         ret = TEST_FAILED;
1197                         goto exit;
1198                 }
1199         }
1200
1201         ret = TEST_SUCCESS;
1202
1203 exit:
1204         rte_free(compress_xform);
1205         return ret;
1206 }
1207
1208 static int
1209 test_compressdev_deflate_stateless_dynamic(void)
1210 {
1211         struct comp_testsuite_params *ts_params = &testsuite_params;
1212         const char *test_buffer;
1213         uint16_t i;
1214         int ret;
1215         struct rte_comp_xform *compress_xform =
1216                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1217
1218         const struct rte_compressdev_capabilities *capab;
1219
1220         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1221         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1222
1223         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1224                 return -ENOTSUP;
1225
1226         if (compress_xform == NULL) {
1227                 RTE_LOG(ERR, USER1,
1228                         "Compress xform could not be created\n");
1229                 ret = TEST_FAILED;
1230                 goto exit;
1231         }
1232
1233         memcpy(compress_xform, ts_params->def_comp_xform,
1234                         sizeof(struct rte_comp_xform));
1235         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1236
1237         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1238                 test_buffer = compress_test_bufs[i];
1239
1240                 /* Compress with compressdev, decompress with Zlib */
1241                 if (test_deflate_comp_decomp(&test_buffer, 1,
1242                                 &i,
1243                                 &compress_xform,
1244                                 &ts_params->def_decomp_xform,
1245                                 1,
1246                                 RTE_COMP_OP_STATELESS,
1247                                 0,
1248                                 ZLIB_DECOMPRESS) < 0) {
1249                         ret = TEST_FAILED;
1250                         goto exit;
1251                 }
1252
1253                 /* Compress with Zlib, decompress with compressdev */
1254                 if (test_deflate_comp_decomp(&test_buffer, 1,
1255                                 &i,
1256                                 &compress_xform,
1257                                 &ts_params->def_decomp_xform,
1258                                 1,
1259                                 RTE_COMP_OP_STATELESS,
1260                                 0,
1261                                 ZLIB_COMPRESS) < 0) {
1262                         ret = TEST_FAILED;
1263                         goto exit;
1264                 }
1265         }
1266
1267         ret = TEST_SUCCESS;
1268
1269 exit:
1270         rte_free(compress_xform);
1271         return ret;
1272 }
1273
1274 static int
1275 test_compressdev_deflate_stateless_multi_op(void)
1276 {
1277         struct comp_testsuite_params *ts_params = &testsuite_params;
1278         uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1279         uint16_t buf_idx[num_bufs];
1280         uint16_t i;
1281
1282         for (i = 0; i < num_bufs; i++)
1283                 buf_idx[i] = i;
1284
1285         /* Compress with compressdev, decompress with Zlib */
1286         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1287                         buf_idx,
1288                         &ts_params->def_comp_xform,
1289                         &ts_params->def_decomp_xform,
1290                         1,
1291                         RTE_COMP_OP_STATELESS,
1292                         0,
1293                         ZLIB_DECOMPRESS) < 0)
1294                 return TEST_FAILED;
1295
1296         /* Compress with Zlib, decompress with compressdev */
1297         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1298                         buf_idx,
1299                         &ts_params->def_comp_xform,
1300                         &ts_params->def_decomp_xform,
1301                         1,
1302                         RTE_COMP_OP_STATELESS,
1303                         0,
1304                         ZLIB_COMPRESS) < 0)
1305                 return TEST_FAILED;
1306
1307         return TEST_SUCCESS;
1308 }
1309
1310 static int
1311 test_compressdev_deflate_stateless_multi_level(void)
1312 {
1313         struct comp_testsuite_params *ts_params = &testsuite_params;
1314         const char *test_buffer;
1315         unsigned int level;
1316         uint16_t i;
1317         int ret;
1318         struct rte_comp_xform *compress_xform =
1319                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1320
1321         if (compress_xform == NULL) {
1322                 RTE_LOG(ERR, USER1,
1323                         "Compress xform could not be created\n");
1324                 ret = TEST_FAILED;
1325                 goto exit;
1326         }
1327
1328         memcpy(compress_xform, ts_params->def_comp_xform,
1329                         sizeof(struct rte_comp_xform));
1330
1331         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1332                 test_buffer = compress_test_bufs[i];
1333                 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1334                                 level++) {
1335                         compress_xform->compress.level = level;
1336                         /* Compress with compressdev, decompress with Zlib */
1337                         if (test_deflate_comp_decomp(&test_buffer, 1,
1338                                         &i,
1339                                         &compress_xform,
1340                                         &ts_params->def_decomp_xform,
1341                                         1,
1342                                         RTE_COMP_OP_STATELESS,
1343                                         0,
1344                                         ZLIB_DECOMPRESS) < 0) {
1345                                 ret = TEST_FAILED;
1346                                 goto exit;
1347                         }
1348                 }
1349         }
1350
1351         ret = TEST_SUCCESS;
1352
1353 exit:
1354         rte_free(compress_xform);
1355         return ret;
1356 }
1357
1358 #define NUM_XFORMS 3
1359 static int
1360 test_compressdev_deflate_stateless_multi_xform(void)
1361 {
1362         struct comp_testsuite_params *ts_params = &testsuite_params;
1363         uint16_t num_bufs = NUM_XFORMS;
1364         struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1365         struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1366         const char *test_buffers[NUM_XFORMS];
1367         uint16_t i;
1368         unsigned int level = RTE_COMP_LEVEL_MIN;
1369         uint16_t buf_idx[num_bufs];
1370
1371         int ret;
1372
1373         /* Create multiple xforms with various levels */
1374         for (i = 0; i < NUM_XFORMS; i++) {
1375                 compress_xforms[i] = rte_malloc(NULL,
1376                                 sizeof(struct rte_comp_xform), 0);
1377                 if (compress_xforms[i] == NULL) {
1378                         RTE_LOG(ERR, USER1,
1379                                 "Compress xform could not be created\n");
1380                         ret = TEST_FAILED;
1381                         goto exit;
1382                 }
1383
1384                 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1385                                 sizeof(struct rte_comp_xform));
1386                 compress_xforms[i]->compress.level = level;
1387                 level++;
1388
1389                 decompress_xforms[i] = rte_malloc(NULL,
1390                                 sizeof(struct rte_comp_xform), 0);
1391                 if (decompress_xforms[i] == NULL) {
1392                         RTE_LOG(ERR, USER1,
1393                                 "Decompress xform could not be created\n");
1394                         ret = TEST_FAILED;
1395                         goto exit;
1396                 }
1397
1398                 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1399                                 sizeof(struct rte_comp_xform));
1400         }
1401
1402         for (i = 0; i < NUM_XFORMS; i++) {
1403                 buf_idx[i] = 0;
1404                 /* Use the same buffer in all sessions */
1405                 test_buffers[i] = compress_test_bufs[0];
1406         }
1407         /* Compress with compressdev, decompress with Zlib */
1408         if (test_deflate_comp_decomp(test_buffers, num_bufs,
1409                         buf_idx,
1410                         compress_xforms,
1411                         decompress_xforms,
1412                         NUM_XFORMS,
1413                         RTE_COMP_OP_STATELESS,
1414                         0,
1415                         ZLIB_DECOMPRESS) < 0) {
1416                 ret = TEST_FAILED;
1417                 goto exit;
1418         }
1419
1420         ret = TEST_SUCCESS;
1421 exit:
1422         for (i = 0; i < NUM_XFORMS; i++) {
1423                 rte_free(compress_xforms[i]);
1424                 rte_free(decompress_xforms[i]);
1425         }
1426
1427         return ret;
1428 }
1429
1430 static int
1431 test_compressdev_deflate_stateless_sgl(void)
1432 {
1433         struct comp_testsuite_params *ts_params = &testsuite_params;
1434         uint16_t i;
1435         const char *test_buffer;
1436         const struct rte_compressdev_capabilities *capab;
1437
1438         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1439         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1440
1441         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1442                 return -ENOTSUP;
1443
1444         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1445                 test_buffer = compress_test_bufs[i];
1446                 /* Compress with compressdev, decompress with Zlib */
1447                 if (test_deflate_comp_decomp(&test_buffer, 1,
1448                                 &i,
1449                                 &ts_params->def_comp_xform,
1450                                 &ts_params->def_decomp_xform,
1451                                 1,
1452                                 RTE_COMP_OP_STATELESS,
1453                                 1,
1454                                 ZLIB_DECOMPRESS) < 0)
1455                         return TEST_FAILED;
1456
1457                 /* Compress with Zlib, decompress with compressdev */
1458                 if (test_deflate_comp_decomp(&test_buffer, 1,
1459                                 &i,
1460                                 &ts_params->def_comp_xform,
1461                                 &ts_params->def_decomp_xform,
1462                                 1,
1463                                 RTE_COMP_OP_STATELESS,
1464                                 1,
1465                                 ZLIB_COMPRESS) < 0)
1466                         return TEST_FAILED;
1467         }
1468
1469         return TEST_SUCCESS;
1470 }
1471
1472 static struct unit_test_suite compressdev_testsuite  = {
1473         .suite_name = "compressdev unit test suite",
1474         .setup = testsuite_setup,
1475         .teardown = testsuite_teardown,
1476         .unit_test_cases = {
1477                 TEST_CASE_ST(NULL, NULL,
1478                         test_compressdev_invalid_configuration),
1479                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1480                         test_compressdev_deflate_stateless_fixed),
1481                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1482                         test_compressdev_deflate_stateless_dynamic),
1483                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1484                         test_compressdev_deflate_stateless_multi_op),
1485                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1486                         test_compressdev_deflate_stateless_multi_level),
1487                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1488                         test_compressdev_deflate_stateless_multi_xform),
1489                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1490                         test_compressdev_deflate_stateless_sgl),
1491                 TEST_CASES_END() /**< NULL terminate unit test array */
1492         }
1493 };
1494
1495 static int
1496 test_compressdev(void)
1497 {
1498         return unit_test_suite_runner(&compressdev_testsuite);
1499 }
1500
1501 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);