283c64971511dd4997f662c4e607588efcc9bd72
[dpdk.git] / app / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
13 #include <rte_mbuf.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
16
17 #include "test_compressdev_test_buffer.h"
18 #include "test.h"
19
20 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
21
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
26
27 /*
28  * 30% extra size for compressed data compared to original data,
29  * in case data size cannot be reduced and it is actually bigger
30  * due to the compress block headers
31  */
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2
34 #define NUM_LARGE_MBUFS 16
35 #define SMALL_SEG_SIZE 256
36 #define MAX_SEGS 16
37 #define NUM_OPS 16
38 #define NUM_MAX_XFORMS 16
39 #define NUM_MAX_INFLIGHT_OPS 128
40 #define CACHE_SIZE 0
41
42 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
43 #define ZLIB_HEADER_SIZE 2
44 #define ZLIB_TRAILER_SIZE 4
45 #define GZIP_HEADER_SIZE 10
46 #define GZIP_TRAILER_SIZE 8
47
48 #define OUT_OF_SPACE_BUF 1
49
50 #define MAX_MBUF_SEGMENT_SIZE 65535
51 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
52 #define NUM_BIG_MBUFS 4
53 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
54
55 const char *
56 huffman_type_strings[] = {
57         [RTE_COMP_HUFFMAN_DEFAULT]      = "PMD default",
58         [RTE_COMP_HUFFMAN_FIXED]        = "Fixed",
59         [RTE_COMP_HUFFMAN_DYNAMIC]      = "Dynamic"
60 };
61
62 enum zlib_direction {
63         ZLIB_NONE,
64         ZLIB_COMPRESS,
65         ZLIB_DECOMPRESS,
66         ZLIB_ALL
67 };
68
69 enum varied_buff {
70         LB_BOTH = 0,    /* both input and output are linear*/
71         SGL_BOTH,       /* both input and output are chained */
72         SGL_TO_LB,      /* input buffer is chained */
73         LB_TO_SGL       /* output buffer is chained */
74 };
75
76 enum overflow_test {
77         OVERFLOW_DISABLED,
78         OVERFLOW_ENABLED
79 };
80
81 struct priv_op_data {
82         uint16_t orig_idx;
83 };
84
85 struct comp_testsuite_params {
86         struct rte_mempool *large_mbuf_pool;
87         struct rte_mempool *small_mbuf_pool;
88         struct rte_mempool *big_mbuf_pool;
89         struct rte_mempool *op_pool;
90         struct rte_comp_xform *def_comp_xform;
91         struct rte_comp_xform *def_decomp_xform;
92 };
93
94 struct interim_data_params {
95         const char * const *test_bufs;
96         unsigned int num_bufs;
97         uint16_t *buf_idx;
98         struct rte_comp_xform **compress_xforms;
99         struct rte_comp_xform **decompress_xforms;
100         unsigned int num_xforms;
101 };
102
103 struct test_data_params {
104         enum rte_comp_op_type compress_state;
105         enum rte_comp_op_type decompress_state;
106         enum varied_buff buff_type;
107         enum zlib_direction zlib_dir;
108         unsigned int out_of_space;
109         unsigned int big_data;
110         /* stateful decompression specific parameters */
111         unsigned int decompress_output_block_size;
112         unsigned int decompress_steps_max;
113         /* external mbufs specific parameters */
114         unsigned int use_external_mbufs;
115         unsigned int inbuf_data_size;
116         const struct rte_memzone *inbuf_memzone;
117         const struct rte_memzone *compbuf_memzone;
118         const struct rte_memzone *uncompbuf_memzone;
119         /* overflow test activation */
120         enum overflow_test overflow;
121 };
122
123 static struct comp_testsuite_params testsuite_params = { 0 };
124
125 static void
126 testsuite_teardown(void)
127 {
128         struct comp_testsuite_params *ts_params = &testsuite_params;
129
130         if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
131                 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
132         if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
133                 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
134         if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
135                 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
136         if (rte_mempool_in_use_count(ts_params->op_pool))
137                 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
138
139         rte_mempool_free(ts_params->large_mbuf_pool);
140         rte_mempool_free(ts_params->small_mbuf_pool);
141         rte_mempool_free(ts_params->big_mbuf_pool);
142         rte_mempool_free(ts_params->op_pool);
143         rte_free(ts_params->def_comp_xform);
144         rte_free(ts_params->def_decomp_xform);
145 }
146
147 static int
148 testsuite_setup(void)
149 {
150         struct comp_testsuite_params *ts_params = &testsuite_params;
151         uint32_t max_buf_size = 0;
152         unsigned int i;
153
154         if (rte_compressdev_count() == 0) {
155                 RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
156                 return TEST_SKIPPED;
157         }
158
159         RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
160                                 rte_compressdev_name_get(0));
161
162         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
163                 max_buf_size = RTE_MAX(max_buf_size,
164                                 strlen(compress_test_bufs[i]) + 1);
165
166         /*
167          * Buffers to be used in compression and decompression.
168          * Since decompressed data might be larger than
169          * compressed data (due to block header),
170          * buffers should be big enough for both cases.
171          */
172         max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
173         ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
174                         NUM_LARGE_MBUFS,
175                         CACHE_SIZE, 0,
176                         max_buf_size + RTE_PKTMBUF_HEADROOM,
177                         rte_socket_id());
178         if (ts_params->large_mbuf_pool == NULL) {
179                 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
180                 return TEST_FAILED;
181         }
182
183         /* Create mempool with smaller buffers for SGL testing */
184         ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
185                         NUM_LARGE_MBUFS * MAX_SEGS,
186                         CACHE_SIZE, 0,
187                         SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
188                         rte_socket_id());
189         if (ts_params->small_mbuf_pool == NULL) {
190                 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
191                 goto exit;
192         }
193
194         /* Create mempool with big buffers for SGL testing */
195         ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
196                         NUM_BIG_MBUFS + 1,
197                         CACHE_SIZE, 0,
198                         MAX_MBUF_SEGMENT_SIZE,
199                         rte_socket_id());
200         if (ts_params->big_mbuf_pool == NULL) {
201                 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
202                 goto exit;
203         }
204
205         ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
206                                 0, sizeof(struct priv_op_data),
207                                 rte_socket_id());
208         if (ts_params->op_pool == NULL) {
209                 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
210                 goto exit;
211         }
212
213         ts_params->def_comp_xform =
214                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
215         if (ts_params->def_comp_xform == NULL) {
216                 RTE_LOG(ERR, USER1,
217                         "Default compress xform could not be created\n");
218                 goto exit;
219         }
220         ts_params->def_decomp_xform =
221                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
222         if (ts_params->def_decomp_xform == NULL) {
223                 RTE_LOG(ERR, USER1,
224                         "Default decompress xform could not be created\n");
225                 goto exit;
226         }
227
228         /* Initializes default values for compress/decompress xforms */
229         ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
230         ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
231         ts_params->def_comp_xform->compress.deflate.huffman =
232                                                 RTE_COMP_HUFFMAN_DEFAULT;
233         ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
234         ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
235         ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
236
237         ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
238         ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
239         ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
240         ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
241
242         return TEST_SUCCESS;
243
244 exit:
245         testsuite_teardown();
246
247         return TEST_FAILED;
248 }
249
250 static int
251 generic_ut_setup(void)
252 {
253         /* Configure compressdev (one device, one queue pair) */
254         struct rte_compressdev_config config = {
255                 .socket_id = rte_socket_id(),
256                 .nb_queue_pairs = 1,
257                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
258                 .max_nb_streams = 1
259         };
260
261         if (rte_compressdev_configure(0, &config) < 0) {
262                 RTE_LOG(ERR, USER1, "Device configuration failed\n");
263                 return -1;
264         }
265
266         if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
267                         rte_socket_id()) < 0) {
268                 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
269                 return -1;
270         }
271
272         if (rte_compressdev_start(0) < 0) {
273                 RTE_LOG(ERR, USER1, "Device could not be started\n");
274                 return -1;
275         }
276
277         return 0;
278 }
279
280 static void
281 generic_ut_teardown(void)
282 {
283         rte_compressdev_stop(0);
284         if (rte_compressdev_close(0) < 0)
285                 RTE_LOG(ERR, USER1, "Device could not be closed\n");
286 }
287
288 static int
289 test_compressdev_invalid_configuration(void)
290 {
291         struct rte_compressdev_config invalid_config;
292         struct rte_compressdev_config valid_config = {
293                 .socket_id = rte_socket_id(),
294                 .nb_queue_pairs = 1,
295                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
296                 .max_nb_streams = 1
297         };
298         struct rte_compressdev_info dev_info;
299
300         /* Invalid configuration with 0 queue pairs */
301         memcpy(&invalid_config, &valid_config,
302                         sizeof(struct rte_compressdev_config));
303         invalid_config.nb_queue_pairs = 0;
304
305         TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
306                         "Device configuration was successful "
307                         "with no queue pairs (invalid)\n");
308
309         /*
310          * Invalid configuration with too many queue pairs
311          * (if there is an actual maximum number of queue pairs)
312          */
313         rte_compressdev_info_get(0, &dev_info);
314         if (dev_info.max_nb_queue_pairs != 0) {
315                 memcpy(&invalid_config, &valid_config,
316                         sizeof(struct rte_compressdev_config));
317                 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
318
319                 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
320                                 "Device configuration was successful "
321                                 "with too many queue pairs (invalid)\n");
322         }
323
324         /* Invalid queue pair setup, with no number of queue pairs set */
325         TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
326                                 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
327                         "Queue pair setup was successful "
328                         "with no queue pairs set (invalid)\n");
329
330         return TEST_SUCCESS;
331 }
332
333 static int
334 compare_buffers(const char *buffer1, uint32_t buffer1_len,
335                 const char *buffer2, uint32_t buffer2_len)
336 {
337         if (buffer1_len != buffer2_len) {
338                 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
339                 return -1;
340         }
341
342         if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
343                 RTE_LOG(ERR, USER1, "Buffers are different\n");
344                 return -1;
345         }
346
347         return 0;
348 }
349
350 /*
351  * Maps compressdev and Zlib flush flags
352  */
353 static int
354 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
355 {
356         switch (flag) {
357         case RTE_COMP_FLUSH_NONE:
358                 return Z_NO_FLUSH;
359         case RTE_COMP_FLUSH_SYNC:
360                 return Z_SYNC_FLUSH;
361         case RTE_COMP_FLUSH_FULL:
362                 return Z_FULL_FLUSH;
363         case RTE_COMP_FLUSH_FINAL:
364                 return Z_FINISH;
365         /*
366          * There should be only the values above,
367          * so this should never happen
368          */
369         default:
370                 return -1;
371         }
372 }
373
374 static int
375 compress_zlib(struct rte_comp_op *op,
376                 const struct rte_comp_xform *xform, int mem_level)
377 {
378         z_stream stream;
379         int zlib_flush;
380         int strategy, window_bits, comp_level;
381         int ret = TEST_FAILED;
382         uint8_t *single_src_buf = NULL;
383         uint8_t *single_dst_buf = NULL;
384
385         /* initialize zlib stream */
386         stream.zalloc = Z_NULL;
387         stream.zfree = Z_NULL;
388         stream.opaque = Z_NULL;
389
390         if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
391                 strategy = Z_FIXED;
392         else
393                 strategy = Z_DEFAULT_STRATEGY;
394
395         /*
396          * Window bits is the base two logarithm of the window size (in bytes).
397          * When doing raw DEFLATE, this number will be negative.
398          */
399         window_bits = -(xform->compress.window_size);
400         if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
401                 window_bits *= -1;
402         else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
403                 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
404
405         comp_level = xform->compress.level;
406
407         if (comp_level != RTE_COMP_LEVEL_NONE)
408                 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
409                         window_bits, mem_level, strategy);
410         else
411                 ret = deflateInit(&stream, Z_NO_COMPRESSION);
412
413         if (ret != Z_OK) {
414                 printf("Zlib deflate could not be initialized\n");
415                 goto exit;
416         }
417
418         /* Assuming stateless operation */
419         /* SGL Input */
420         if (op->m_src->nb_segs > 1) {
421                 single_src_buf = rte_malloc(NULL,
422                                 rte_pktmbuf_pkt_len(op->m_src), 0);
423                 if (single_src_buf == NULL) {
424                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
425                         goto exit;
426                 }
427
428                 if (rte_pktmbuf_read(op->m_src, op->src.offset,
429                                         rte_pktmbuf_pkt_len(op->m_src) -
430                                         op->src.offset,
431                                         single_src_buf) == NULL) {
432                         RTE_LOG(ERR, USER1,
433                                 "Buffer could not be read entirely\n");
434                         goto exit;
435                 }
436
437                 stream.avail_in = op->src.length;
438                 stream.next_in = single_src_buf;
439
440         } else {
441                 stream.avail_in = op->src.length;
442                 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
443                                 op->src.offset);
444         }
445         /* SGL output */
446         if (op->m_dst->nb_segs > 1) {
447
448                 single_dst_buf = rte_malloc(NULL,
449                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
450                         if (single_dst_buf == NULL) {
451                                 RTE_LOG(ERR, USER1,
452                                         "Buffer could not be allocated\n");
453                         goto exit;
454                 }
455
456                 stream.avail_out = op->m_dst->pkt_len;
457                 stream.next_out = single_dst_buf;
458
459         } else {/* linear output */
460                 stream.avail_out = op->m_dst->data_len;
461                 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
462                                 op->dst.offset);
463         }
464
465         /* Stateless operation, all buffer will be compressed in one go */
466         zlib_flush = map_zlib_flush_flag(op->flush_flag);
467         ret = deflate(&stream, zlib_flush);
468
469         if (stream.avail_in != 0) {
470                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
471                 goto exit;
472         }
473
474         if (ret != Z_STREAM_END)
475                 goto exit;
476
477         /* Copy data to destination SGL */
478         if (op->m_dst->nb_segs > 1) {
479                 uint32_t remaining_data = stream.total_out;
480                 uint8_t *src_data = single_dst_buf;
481                 struct rte_mbuf *dst_buf = op->m_dst;
482
483                 while (remaining_data > 0) {
484                         uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
485                                                 uint8_t *, op->dst.offset);
486                         /* Last segment */
487                         if (remaining_data < dst_buf->data_len) {
488                                 memcpy(dst_data, src_data, remaining_data);
489                                 remaining_data = 0;
490                         } else {
491                                 memcpy(dst_data, src_data, dst_buf->data_len);
492                                 remaining_data -= dst_buf->data_len;
493                                 src_data += dst_buf->data_len;
494                                 dst_buf = dst_buf->next;
495                         }
496                 }
497         }
498
499         op->consumed = stream.total_in;
500         if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
501                 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
502                 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
503                 op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
504                                 ZLIB_TRAILER_SIZE);
505         } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
506                 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
507                 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
508                 op->produced = stream.total_out - (GZIP_HEADER_SIZE +
509                                 GZIP_TRAILER_SIZE);
510         } else
511                 op->produced = stream.total_out;
512
513         op->status = RTE_COMP_OP_STATUS_SUCCESS;
514         op->output_chksum = stream.adler;
515
516         deflateReset(&stream);
517
518         ret = 0;
519 exit:
520         deflateEnd(&stream);
521         rte_free(single_src_buf);
522         rte_free(single_dst_buf);
523
524         return ret;
525 }
526
527 static int
528 decompress_zlib(struct rte_comp_op *op,
529                 const struct rte_comp_xform *xform)
530 {
531         z_stream stream;
532         int window_bits;
533         int zlib_flush;
534         int ret = TEST_FAILED;
535         uint8_t *single_src_buf = NULL;
536         uint8_t *single_dst_buf = NULL;
537
538         /* initialize zlib stream */
539         stream.zalloc = Z_NULL;
540         stream.zfree = Z_NULL;
541         stream.opaque = Z_NULL;
542
543         /*
544          * Window bits is the base two logarithm of the window size (in bytes).
545          * When doing raw DEFLATE, this number will be negative.
546          */
547         window_bits = -(xform->decompress.window_size);
548         ret = inflateInit2(&stream, window_bits);
549
550         if (ret != Z_OK) {
551                 printf("Zlib deflate could not be initialized\n");
552                 goto exit;
553         }
554
555         /* Assuming stateless operation */
556         /* SGL */
557         if (op->m_src->nb_segs > 1) {
558                 single_src_buf = rte_malloc(NULL,
559                                 rte_pktmbuf_pkt_len(op->m_src), 0);
560                 if (single_src_buf == NULL) {
561                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
562                         goto exit;
563                 }
564                 single_dst_buf = rte_malloc(NULL,
565                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
566                 if (single_dst_buf == NULL) {
567                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
568                         goto exit;
569                 }
570                 if (rte_pktmbuf_read(op->m_src, 0,
571                                         rte_pktmbuf_pkt_len(op->m_src),
572                                         single_src_buf) == NULL) {
573                         RTE_LOG(ERR, USER1,
574                                 "Buffer could not be read entirely\n");
575                         goto exit;
576                 }
577
578                 stream.avail_in = op->src.length;
579                 stream.next_in = single_src_buf;
580                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
581                 stream.next_out = single_dst_buf;
582
583         } else {
584                 stream.avail_in = op->src.length;
585                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
586                 stream.avail_out = op->m_dst->data_len;
587                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
588         }
589
590         /* Stateless operation, all buffer will be compressed in one go */
591         zlib_flush = map_zlib_flush_flag(op->flush_flag);
592         ret = inflate(&stream, zlib_flush);
593
594         if (stream.avail_in != 0) {
595                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
596                 goto exit;
597         }
598
599         if (ret != Z_STREAM_END)
600                 goto exit;
601
602         if (op->m_src->nb_segs > 1) {
603                 uint32_t remaining_data = stream.total_out;
604                 uint8_t *src_data = single_dst_buf;
605                 struct rte_mbuf *dst_buf = op->m_dst;
606
607                 while (remaining_data > 0) {
608                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
609                                         uint8_t *);
610                         /* Last segment */
611                         if (remaining_data < dst_buf->data_len) {
612                                 memcpy(dst_data, src_data, remaining_data);
613                                 remaining_data = 0;
614                         } else {
615                                 memcpy(dst_data, src_data, dst_buf->data_len);
616                                 remaining_data -= dst_buf->data_len;
617                                 src_data += dst_buf->data_len;
618                                 dst_buf = dst_buf->next;
619                         }
620                 }
621         }
622
623         op->consumed = stream.total_in;
624         op->produced = stream.total_out;
625         op->status = RTE_COMP_OP_STATUS_SUCCESS;
626
627         inflateReset(&stream);
628
629         ret = 0;
630 exit:
631         inflateEnd(&stream);
632
633         return ret;
634 }
635
636 static int
637 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
638                 uint32_t total_data_size,
639                 struct rte_mempool *small_mbuf_pool,
640                 struct rte_mempool *large_mbuf_pool,
641                 uint8_t limit_segs_in_sgl,
642                 uint16_t seg_size)
643 {
644         uint32_t remaining_data = total_data_size;
645         uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
646         struct rte_mempool *pool;
647         struct rte_mbuf *next_seg;
648         uint32_t data_size;
649         char *buf_ptr;
650         const char *data_ptr = test_buf;
651         uint16_t i;
652         int ret;
653
654         if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
655                 num_remaining_segs = limit_segs_in_sgl - 1;
656
657         /*
658          * Allocate data in the first segment (header) and
659          * copy data if test buffer is provided
660          */
661         if (remaining_data < seg_size)
662                 data_size = remaining_data;
663         else
664                 data_size = seg_size;
665         buf_ptr = rte_pktmbuf_append(head_buf, data_size);
666         if (buf_ptr == NULL) {
667                 RTE_LOG(ERR, USER1,
668                         "Not enough space in the 1st buffer\n");
669                 return -1;
670         }
671
672         if (data_ptr != NULL) {
673                 /* Copy characters without NULL terminator */
674                 strncpy(buf_ptr, data_ptr, data_size);
675                 data_ptr += data_size;
676         }
677         remaining_data -= data_size;
678         num_remaining_segs--;
679
680         /*
681          * Allocate the rest of the segments,
682          * copy the rest of the data and chain the segments.
683          */
684         for (i = 0; i < num_remaining_segs; i++) {
685
686                 if (i == (num_remaining_segs - 1)) {
687                         /* last segment */
688                         if (remaining_data > seg_size)
689                                 pool = large_mbuf_pool;
690                         else
691                                 pool = small_mbuf_pool;
692                         data_size = remaining_data;
693                 } else {
694                         data_size = seg_size;
695                         pool = small_mbuf_pool;
696                 }
697
698                 next_seg = rte_pktmbuf_alloc(pool);
699                 if (next_seg == NULL) {
700                         RTE_LOG(ERR, USER1,
701                                 "New segment could not be allocated "
702                                 "from the mempool\n");
703                         return -1;
704                 }
705                 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
706                 if (buf_ptr == NULL) {
707                         RTE_LOG(ERR, USER1,
708                                 "Not enough space in the buffer\n");
709                         rte_pktmbuf_free(next_seg);
710                         return -1;
711                 }
712                 if (data_ptr != NULL) {
713                         /* Copy characters without NULL terminator */
714                         strncpy(buf_ptr, data_ptr, data_size);
715                         data_ptr += data_size;
716                 }
717                 remaining_data -= data_size;
718
719                 ret = rte_pktmbuf_chain(head_buf, next_seg);
720                 if (ret != 0) {
721                         rte_pktmbuf_free(next_seg);
722                         RTE_LOG(ERR, USER1,
723                                 "Segment could not chained\n");
724                         return -1;
725                 }
726         }
727
728         return 0;
729 }
730
731 static void
732 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
733 {
734 }
735
736 static int
737 test_run_enqueue_dequeue(struct rte_comp_op **ops, unsigned int num_bufs,
738                   struct rte_comp_op **ops_processed)
739 {
740         uint16_t num_enqd, num_deqd, num_total_deqd;
741         unsigned int deqd_retries = 0;
742
743         /* Enqueue and dequeue all operations */
744         num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
745         if (num_enqd < num_bufs) {
746                 RTE_LOG(ERR, USER1,
747                         "Some operations could not be enqueued\n");
748                 return -1;
749         }
750
751         num_total_deqd = 0;
752         do {
753                 /*
754                  * If retrying a dequeue call, wait for 10 ms to allow
755                  * enough time to the driver to process the operations
756                  */
757                 if (deqd_retries != 0) {
758                         /*
759                          * Avoid infinite loop if not all the
760                          * operations get out of the device
761                          */
762                         if (deqd_retries == MAX_DEQD_RETRIES) {
763                                 RTE_LOG(ERR, USER1,
764                                         "Not all operations could be dequeued\n");
765                                 return -1;
766                         }
767                         usleep(DEQUEUE_WAIT_TIME);
768                 }
769                 num_deqd = rte_compressdev_dequeue_burst(0, 0,
770                                 &ops_processed[num_total_deqd], num_bufs);
771                 num_total_deqd += num_deqd;
772                 deqd_retries++;
773
774         } while (num_total_deqd < num_enqd);
775
776         return 0;
777 }
778
779 /*
780  * Compresses and decompresses buffer with compressdev API and Zlib API
781  */
782 static int
783 test_deflate_comp_decomp(const struct interim_data_params *int_data,
784                 const struct test_data_params *test_data)
785 {
786         struct comp_testsuite_params *ts_params = &testsuite_params;
787         const char * const *test_bufs = int_data->test_bufs;
788         unsigned int num_bufs = int_data->num_bufs;
789         uint16_t *buf_idx = int_data->buf_idx;
790         struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
791         struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
792         unsigned int num_xforms = int_data->num_xforms;
793         enum rte_comp_op_type compress_state = test_data->compress_state;
794         enum rte_comp_op_type decompress_state = test_data->decompress_state;
795         unsigned int buff_type = test_data->buff_type;
796         unsigned int out_of_space = test_data->out_of_space;
797         unsigned int big_data = test_data->big_data;
798         enum zlib_direction zlib_dir = test_data->zlib_dir;
799         enum overflow_test overflow_tst = test_data->overflow;
800         int ret_status = TEST_FAILED;
801         struct rte_mbuf_ext_shared_info inbuf_info;
802         struct rte_mbuf_ext_shared_info compbuf_info;
803         struct rte_mbuf_ext_shared_info decompbuf_info;
804         int ret;
805         struct rte_mbuf *uncomp_bufs[num_bufs];
806         struct rte_mbuf *comp_bufs[num_bufs];
807         struct rte_comp_op *ops[num_bufs];
808         struct rte_comp_op *ops_processed[num_bufs];
809         void *priv_xforms[num_bufs];
810         uint16_t num_enqd, num_deqd, num_total_deqd;
811         uint16_t num_priv_xforms = 0;
812         unsigned int deqd_retries = 0;
813         struct priv_op_data *priv_data;
814         char *buf_ptr;
815         unsigned int i;
816         struct rte_mempool *buf_pool;
817         uint32_t data_size;
818         /* Compressing with CompressDev */
819         unsigned int oos_zlib_decompress =
820                         (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
821         /* Decompressing with CompressDev */
822         unsigned int oos_zlib_compress =
823                         (zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
824         const struct rte_compressdev_capabilities *capa =
825                 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
826         char *contig_buf = NULL;
827         uint64_t compress_checksum[num_bufs];
828         uint32_t compressed_data_size[num_bufs];
829         void *stream = NULL;
830         char *all_decomp_data = NULL;
831         unsigned int decomp_produced_data_size = 0;
832         unsigned int step = 0;
833
834         TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
835                     "Number of stateful operations in a step should be 1");
836
837         if (capa == NULL) {
838                 RTE_LOG(ERR, USER1,
839                         "Compress device does not support DEFLATE\n");
840                 return -ENOTSUP;
841         }
842
843         /* Initialize all arrays to NULL */
844         memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
845         memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
846         memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
847         memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
848         memset(priv_xforms, 0, sizeof(void *) * num_bufs);
849         memset(compressed_data_size, 0, sizeof(uint32_t) * num_bufs);
850
851         if (decompress_state == RTE_COMP_OP_STATEFUL) {
852                 data_size = strlen(test_bufs[0]) + 1;
853                 all_decomp_data = rte_malloc(NULL, data_size,
854                                              RTE_CACHE_LINE_SIZE);
855         }
856
857         if (big_data)
858                 buf_pool = ts_params->big_mbuf_pool;
859         else if (buff_type == SGL_BOTH)
860                 buf_pool = ts_params->small_mbuf_pool;
861         else
862                 buf_pool = ts_params->large_mbuf_pool;
863
864         /* Prepare the source mbufs with the data */
865         ret = rte_pktmbuf_alloc_bulk(buf_pool,
866                                 uncomp_bufs, num_bufs);
867         if (ret < 0) {
868                 RTE_LOG(ERR, USER1,
869                         "Source mbufs could not be allocated "
870                         "from the mempool\n");
871                 goto exit;
872         }
873
874         if (test_data->use_external_mbufs) {
875                 inbuf_info.free_cb = extbuf_free_callback;
876                 inbuf_info.fcb_opaque = NULL;
877                 rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
878                 for (i = 0; i < num_bufs; i++) {
879                         rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
880                                         test_data->inbuf_memzone->addr,
881                                         test_data->inbuf_memzone->iova,
882                                         test_data->inbuf_data_size,
883                                         &inbuf_info);
884                         rte_pktmbuf_append(uncomp_bufs[i],
885                                         test_data->inbuf_data_size);
886                 }
887         } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
888                 for (i = 0; i < num_bufs; i++) {
889                         data_size = strlen(test_bufs[i]) + 1;
890                         if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
891                             data_size,
892                             big_data ? buf_pool : ts_params->small_mbuf_pool,
893                             big_data ? buf_pool : ts_params->large_mbuf_pool,
894                             big_data ? 0 : MAX_SEGS,
895                             big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
896                                 goto exit;
897                 }
898         } else {
899                 for (i = 0; i < num_bufs; i++) {
900                         data_size = strlen(test_bufs[i]) + 1;
901                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
902                         if (buf_ptr == NULL) {
903                                 RTE_LOG(ERR, USER1,
904                                         "Append extra bytes to the source mbuf failed\n");
905                                 goto exit;
906                         }
907                         strlcpy(buf_ptr, test_bufs[i], data_size);
908                 }
909         }
910
911         /* Prepare the destination mbufs */
912         ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
913         if (ret < 0) {
914                 RTE_LOG(ERR, USER1,
915                         "Destination mbufs could not be allocated "
916                         "from the mempool\n");
917                 goto exit;
918         }
919
920         if (test_data->use_external_mbufs) {
921                 compbuf_info.free_cb = extbuf_free_callback;
922                 compbuf_info.fcb_opaque = NULL;
923                 rte_mbuf_ext_refcnt_set(&compbuf_info, 1);
924                 for (i = 0; i < num_bufs; i++) {
925                         rte_pktmbuf_attach_extbuf(comp_bufs[i],
926                                         test_data->compbuf_memzone->addr,
927                                         test_data->compbuf_memzone->iova,
928                                         test_data->compbuf_memzone->len,
929                                         &compbuf_info);
930                         rte_pktmbuf_append(comp_bufs[i],
931                                         test_data->compbuf_memzone->len);
932                 }
933         } else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
934                 for (i = 0; i < num_bufs; i++) {
935                         if (out_of_space == 1 && oos_zlib_decompress)
936                                 data_size = OUT_OF_SPACE_BUF;
937                         else
938                                 (data_size = strlen(test_bufs[i]) *
939                                         COMPRESS_BUF_SIZE_RATIO);
940
941                         if (prepare_sgl_bufs(NULL, comp_bufs[i],
942                               data_size,
943                               big_data ? buf_pool : ts_params->small_mbuf_pool,
944                               big_data ? buf_pool : ts_params->large_mbuf_pool,
945                               big_data ? 0 : MAX_SEGS,
946                               big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
947                                         < 0)
948                                 goto exit;
949                 }
950
951         } else {
952                 for (i = 0; i < num_bufs; i++) {
953                         if (out_of_space == 1 && oos_zlib_decompress)
954                                 data_size = OUT_OF_SPACE_BUF;
955                         else {
956                                 float ratio =
957                                 ((test_data->zlib_dir == ZLIB_DECOMPRESS ||
958                                    test_data->zlib_dir == ZLIB_NONE) &&
959                                   overflow_tst == OVERFLOW_ENABLED) ?
960                                          COMPRESS_BUF_SIZE_RATIO_OVERFLOW :
961                                          COMPRESS_BUF_SIZE_RATIO;
962
963                                 data_size = strlen(test_bufs[i]) * ratio;
964                         }
965                         buf_ptr = rte_pktmbuf_append(comp_bufs[i], data_size);
966                         if (buf_ptr == NULL) {
967                                 RTE_LOG(ERR, USER1,
968                                         "Append extra bytes to the destination mbuf failed\n");
969                                 goto exit;
970                         }
971                 }
972         }
973
974         /* Build the compression operations */
975         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
976         if (ret < 0) {
977                 RTE_LOG(ERR, USER1,
978                         "Compress operations could not be allocated "
979                         "from the mempool\n");
980                 goto exit;
981         }
982
983
984         for (i = 0; i < num_bufs; i++) {
985                 ops[i]->m_src = uncomp_bufs[i];
986                 ops[i]->m_dst = comp_bufs[i];
987                 ops[i]->src.offset = 0;
988                 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
989                 ops[i]->dst.offset = 0;
990                 if (compress_state == RTE_COMP_OP_STATELESS)
991                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
992                 else {
993                         RTE_LOG(ERR, USER1,
994                                 "Stateful operations are not supported "
995                                 "in these tests yet\n");
996                         goto exit;
997                 }
998                 ops[i]->input_chksum = 0;
999                 /*
1000                  * Store original operation index in private data,
1001                  * since ordering does not have to be maintained,
1002                  * when dequeueing from compressdev, so a comparison
1003                  * at the end of the test can be done.
1004                  */
1005                 priv_data = (struct priv_op_data *) (ops[i] + 1);
1006                 priv_data->orig_idx = i;
1007         }
1008
1009         /* Compress data (either with Zlib API or compressdev API */
1010         if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
1011                 for (i = 0; i < num_bufs; i++) {
1012                         const struct rte_comp_xform *compress_xform =
1013                                 compress_xforms[i % num_xforms];
1014                         ret = compress_zlib(ops[i], compress_xform,
1015                                         DEFAULT_MEM_LEVEL);
1016                         if (ret < 0)
1017                                 goto exit;
1018
1019                         ops_processed[i] = ops[i];
1020                 }
1021         } else {
1022                 /* Create compress private xform data */
1023                 for (i = 0; i < num_xforms; i++) {
1024                         ret = rte_compressdev_private_xform_create(0,
1025                                 (const struct rte_comp_xform *)compress_xforms[i],
1026                                 &priv_xforms[i]);
1027                         if (ret < 0) {
1028                                 RTE_LOG(ERR, USER1,
1029                                         "Compression private xform "
1030                                         "could not be created\n");
1031                                 goto exit;
1032                         }
1033                         num_priv_xforms++;
1034                 }
1035
1036                 if (capa->comp_feature_flags &
1037                                 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1038                         /* Attach shareable private xform data to ops */
1039                         for (i = 0; i < num_bufs; i++)
1040                                 ops[i]->private_xform = priv_xforms[i % num_xforms];
1041                 } else {
1042                         /* Create rest of the private xforms for the other ops */
1043                         for (i = num_xforms; i < num_bufs; i++) {
1044                                 ret = rte_compressdev_private_xform_create(0,
1045                                         compress_xforms[i % num_xforms],
1046                                         &priv_xforms[i]);
1047                                 if (ret < 0) {
1048                                         RTE_LOG(ERR, USER1,
1049                                                 "Compression private xform "
1050                                                 "could not be created\n");
1051                                         goto exit;
1052                                 }
1053                                 num_priv_xforms++;
1054                         }
1055
1056                         /* Attach non shareable private xform data to ops */
1057                         for (i = 0; i < num_bufs; i++)
1058                                 ops[i]->private_xform = priv_xforms[i];
1059                 }
1060
1061 recovery_lb:
1062                 ret = test_run_enqueue_dequeue(ops, num_bufs, ops_processed);
1063                 if (ret < 0) {
1064                         RTE_LOG(ERR, USER1,
1065                                 "Enqueue/dequeue operation failed\n");
1066                         goto exit;
1067                 }
1068
1069                 for (i = 0; i < num_bufs; i++) {
1070                         compressed_data_size[i] += ops_processed[i]->produced;
1071
1072                         if (ops_processed[i]->status ==
1073                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
1074
1075                                 ops[i]->status =
1076                                         RTE_COMP_OP_STATUS_NOT_PROCESSED;
1077                                 ops[i]->src.offset +=
1078                                         ops_processed[i]->consumed;
1079                                 ops[i]->src.length -=
1080                                         ops_processed[i]->consumed;
1081                                 ops[i]->dst.offset +=
1082                                         ops_processed[i]->produced;
1083
1084                                 buf_ptr = rte_pktmbuf_append(
1085                                         ops[i]->m_dst,
1086                                         ops_processed[i]->produced);
1087
1088                                 if (buf_ptr == NULL) {
1089                                         RTE_LOG(ERR, USER1,
1090                                                 "Data recovery: append extra bytes to the current mbuf failed\n");
1091                                         goto exit;
1092                                 }
1093                                 goto recovery_lb;
1094                         }
1095                 }
1096                 deqd_retries = 0;
1097
1098                 /* Free compress private xforms */
1099                 for (i = 0; i < num_priv_xforms; i++) {
1100                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1101                         priv_xforms[i] = NULL;
1102                 }
1103                 num_priv_xforms = 0;
1104         }
1105
1106         for (i = 0; i < num_bufs; i++) {
1107                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1108                 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1109                 const struct rte_comp_compress_xform *compress_xform =
1110                                 &compress_xforms[xform_idx]->compress;
1111                 enum rte_comp_huffman huffman_type =
1112                         compress_xform->deflate.huffman;
1113                 char engine[] = "zlib (directly, not PMD)";
1114                 if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
1115                         strlcpy(engine, "PMD", sizeof(engine));
1116
1117                 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1118                         " %u bytes (level = %d, huffman = %s)\n",
1119                         buf_idx[priv_data->orig_idx], engine,
1120                         ops_processed[i]->consumed, ops_processed[i]->produced,
1121                         compress_xform->level,
1122                         huffman_type_strings[huffman_type]);
1123                 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1124                         ops_processed[i]->consumed == 0 ? 0 :
1125                         (float)ops_processed[i]->produced /
1126                         ops_processed[i]->consumed * 100);
1127                 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1128                         compress_checksum[i] = ops_processed[i]->output_chksum;
1129                 ops[i] = NULL;
1130         }
1131
1132         /*
1133          * Check operation status and free source mbufs (destination mbuf and
1134          * compress operation information is needed for the decompression stage)
1135          */
1136         for (i = 0; i < num_bufs; i++) {
1137                 if (out_of_space && oos_zlib_decompress) {
1138                         if (ops_processed[i]->status !=
1139                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1140                                 ret_status = TEST_FAILED;
1141                                 RTE_LOG(ERR, USER1,
1142                                         "Operation without expected out of "
1143                                         "space status error\n");
1144                                 goto exit;
1145                         } else
1146                                 continue;
1147                 }
1148
1149                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1150                         if (overflow_tst == OVERFLOW_ENABLED) {
1151                                 if (ops_processed[i]->status ==
1152                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1153                                         ret_status = 1;
1154                                         RTE_LOG(INFO, USER1,
1155                                         "Out-of-space-recoverable functionality"
1156                                         " is not supported on this device\n");
1157                                         goto exit;
1158                                 }
1159                         }
1160                         RTE_LOG(ERR, USER1,
1161                                 "Some operations were not successful\n");
1162                         goto exit;
1163                 }
1164                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1165                 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1166                 uncomp_bufs[priv_data->orig_idx] = NULL;
1167         }
1168
1169         if (out_of_space && oos_zlib_decompress) {
1170                 ret_status = TEST_SUCCESS;
1171                 goto exit;
1172         }
1173
1174         /* Allocate buffers for decompressed data */
1175         ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1176         if (ret < 0) {
1177                 RTE_LOG(ERR, USER1,
1178                         "Destination mbufs could not be allocated "
1179                         "from the mempool\n");
1180                 goto exit;
1181         }
1182
1183         if (test_data->use_external_mbufs) {
1184                 decompbuf_info.free_cb = extbuf_free_callback;
1185                 decompbuf_info.fcb_opaque = NULL;
1186                 rte_mbuf_ext_refcnt_set(&decompbuf_info, 1);
1187                 for (i = 0; i < num_bufs; i++) {
1188                         rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
1189                                         test_data->uncompbuf_memzone->addr,
1190                                         test_data->uncompbuf_memzone->iova,
1191                                         test_data->uncompbuf_memzone->len,
1192                                         &decompbuf_info);
1193                         rte_pktmbuf_append(uncomp_bufs[i],
1194                                         test_data->uncompbuf_memzone->len);
1195                 }
1196         } else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1197                 for (i = 0; i < num_bufs; i++) {
1198                         priv_data = (struct priv_op_data *)
1199                                         (ops_processed[i] + 1);
1200                         if (out_of_space == 1 && oos_zlib_compress)
1201                                 data_size = OUT_OF_SPACE_BUF;
1202                         else if (test_data->decompress_output_block_size != 0)
1203                                 data_size =
1204                                         test_data->decompress_output_block_size;
1205                         else
1206                                 data_size =
1207                                 strlen(test_bufs[priv_data->orig_idx]) + 1;
1208
1209                         if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1210                                data_size,
1211                                big_data ? buf_pool : ts_params->small_mbuf_pool,
1212                                big_data ? buf_pool : ts_params->large_mbuf_pool,
1213                                big_data ? 0 : MAX_SEGS,
1214                                big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1215                                         < 0)
1216                                 goto exit;
1217                 }
1218
1219         } else {
1220                 for (i = 0; i < num_bufs; i++) {
1221                         priv_data = (struct priv_op_data *)
1222                                         (ops_processed[i] + 1);
1223                         if (out_of_space == 1 && oos_zlib_compress)
1224                                 data_size = OUT_OF_SPACE_BUF;
1225                         else if (test_data->decompress_output_block_size != 0)
1226                                 data_size =
1227                                         test_data->decompress_output_block_size;
1228                         else
1229                                 data_size =
1230                                 strlen(test_bufs[priv_data->orig_idx]) + 1;
1231
1232                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
1233                         if (buf_ptr == NULL) {
1234                                 RTE_LOG(ERR, USER1,
1235                                         "Append extra bytes to the decompressed mbuf failed\n");
1236                                 goto exit;
1237                         }
1238                 }
1239         }
1240
1241         /* Build the decompression operations */
1242         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1243         if (ret < 0) {
1244                 RTE_LOG(ERR, USER1,
1245                         "Decompress operations could not be allocated "
1246                         "from the mempool\n");
1247                 goto exit;
1248         }
1249
1250         /* Source buffer is the compressed data from the previous operations */
1251         for (i = 0; i < num_bufs; i++) {
1252                 ops[i]->m_src = ops_processed[i]->m_dst;
1253                 ops[i]->m_dst = uncomp_bufs[i];
1254                 ops[i]->src.offset = 0;
1255                 /*
1256                  * Set the length of the compressed data to the
1257                  * number of bytes that were produced in the previous stage
1258                  */
1259                 if (compressed_data_size[i])
1260                         ops[i]->src.length = compressed_data_size[i];
1261                 else
1262                         ops[i]->src.length = ops_processed[i]->produced;
1263
1264                 ops[i]->dst.offset = 0;
1265                 if (decompress_state == RTE_COMP_OP_STATELESS) {
1266                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1267                         ops[i]->op_type = RTE_COMP_OP_STATELESS;
1268                 } else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
1269                         ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1270                         ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1271                 } else {
1272                         RTE_LOG(ERR, USER1,
1273                                 "Stateful operations are not supported "
1274                                 "in these tests yet\n");
1275                         goto exit;
1276                 }
1277                 ops[i]->input_chksum = 0;
1278                 /*
1279                  * Copy private data from previous operations,
1280                  * to keep the pointer to the original buffer
1281                  */
1282                 memcpy(ops[i] + 1, ops_processed[i] + 1,
1283                                 sizeof(struct priv_op_data));
1284         }
1285
1286         /*
1287          * Free the previous compress operations,
1288          * as they are not needed anymore
1289          */
1290         rte_comp_op_bulk_free(ops_processed, num_bufs);
1291
1292         /* Decompress data (either with Zlib API or compressdev API */
1293         if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1294                 for (i = 0; i < num_bufs; i++) {
1295                         priv_data = (struct priv_op_data *)(ops[i] + 1);
1296                         uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1297                         const struct rte_comp_xform *decompress_xform =
1298                                 decompress_xforms[xform_idx];
1299
1300                         ret = decompress_zlib(ops[i], decompress_xform);
1301                         if (ret < 0)
1302                                 goto exit;
1303
1304                         ops_processed[i] = ops[i];
1305                 }
1306         } else {
1307                 if (decompress_state == RTE_COMP_OP_STATELESS) {
1308                         /* Create decompress private xform data */
1309                         for (i = 0; i < num_xforms; i++) {
1310                                 ret = rte_compressdev_private_xform_create(0,
1311                                         (const struct rte_comp_xform *)
1312                                         decompress_xforms[i],
1313                                         &priv_xforms[i]);
1314                                 if (ret < 0) {
1315                                         RTE_LOG(ERR, USER1,
1316                                                 "Decompression private xform "
1317                                                 "could not be created\n");
1318                                         goto exit;
1319                                 }
1320                                 num_priv_xforms++;
1321                         }
1322
1323                         if (capa->comp_feature_flags &
1324                                         RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1325                                 /* Attach shareable private xform data to ops */
1326                                 for (i = 0; i < num_bufs; i++) {
1327                                         priv_data = (struct priv_op_data *)
1328                                                         (ops[i] + 1);
1329                                         uint16_t xform_idx =
1330                                                priv_data->orig_idx % num_xforms;
1331                                         ops[i]->private_xform =
1332                                                         priv_xforms[xform_idx];
1333                                 }
1334                         } else {
1335                                 /* Create rest of the private xforms */
1336                                 /* for the other ops */
1337                                 for (i = num_xforms; i < num_bufs; i++) {
1338                                         ret =
1339                                          rte_compressdev_private_xform_create(0,
1340                                               decompress_xforms[i % num_xforms],
1341                                               &priv_xforms[i]);
1342                                         if (ret < 0) {
1343                                                 RTE_LOG(ERR, USER1,
1344                                                         "Decompression private xform could not be created\n");
1345                                                 goto exit;
1346                                         }
1347                                         num_priv_xforms++;
1348                                 }
1349
1350                                 /* Attach non shareable private xform data */
1351                                 /* to ops */
1352                                 for (i = 0; i < num_bufs; i++) {
1353                                         priv_data = (struct priv_op_data *)
1354                                                         (ops[i] + 1);
1355                                         uint16_t xform_idx =
1356                                                         priv_data->orig_idx;
1357                                         ops[i]->private_xform =
1358                                                         priv_xforms[xform_idx];
1359                                 }
1360                         }
1361                 } else {
1362                         /* Create a stream object for stateful decompression */
1363                         ret = rte_compressdev_stream_create(0,
1364                                         decompress_xforms[0], &stream);
1365                         if (ret < 0) {
1366                                 RTE_LOG(ERR, USER1,
1367                                         "Decompression stream could not be created, error %d\n",
1368                                         ret);
1369                                 goto exit;
1370                         }
1371                         /* Attach stream to ops */
1372                         for (i = 0; i < num_bufs; i++)
1373                                 ops[i]->stream = stream;
1374                 }
1375
1376 next_step:
1377                 /* Enqueue and dequeue all operations */
1378                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1379                 if (num_enqd < num_bufs) {
1380                         RTE_LOG(ERR, USER1,
1381                                 "The operations could not be enqueued\n");
1382                         goto exit;
1383                 }
1384
1385                 num_total_deqd = 0;
1386                 do {
1387                         /*
1388                          * If retrying a dequeue call, wait for 10 ms to allow
1389                          * enough time to the driver to process the operations
1390                          */
1391                         if (deqd_retries != 0) {
1392                                 /*
1393                                  * Avoid infinite loop if not all the
1394                                  * operations get out of the device
1395                                  */
1396                                 if (deqd_retries == MAX_DEQD_RETRIES) {
1397                                         RTE_LOG(ERR, USER1,
1398                                                 "Not all operations could be "
1399                                                 "dequeued\n");
1400                                         goto exit;
1401                                 }
1402                                 usleep(DEQUEUE_WAIT_TIME);
1403                         }
1404                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
1405                                         &ops_processed[num_total_deqd], num_bufs);
1406                         num_total_deqd += num_deqd;
1407                         deqd_retries++;
1408                 } while (num_total_deqd < num_enqd);
1409
1410                 deqd_retries = 0;
1411         }
1412
1413         for (i = 0; i < num_bufs; i++) {
1414                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1415                 char engine[] = "zlib, (directly, no PMD)";
1416                 if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1417                         strlcpy(engine, "pmd", sizeof(engine));
1418                 RTE_LOG(DEBUG, USER1,
1419                         "Buffer %u decompressed by %s from %u to %u bytes\n",
1420                         buf_idx[priv_data->orig_idx], engine,
1421                         ops_processed[i]->consumed, ops_processed[i]->produced);
1422                 ops[i] = NULL;
1423         }
1424
1425         /*
1426          * Check operation status and free source mbuf (destination mbuf and
1427          * compress operation information is still needed)
1428          */
1429         for (i = 0; i < num_bufs; i++) {
1430                 if (out_of_space && oos_zlib_compress) {
1431                         if (ops_processed[i]->status !=
1432                                         RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1433                                 ret_status = TEST_FAILED;
1434                                 RTE_LOG(ERR, USER1,
1435                                         "Operation without expected out of "
1436                                         "space status error\n");
1437                                 goto exit;
1438                         } else
1439                                 continue;
1440                 }
1441
1442                 if (decompress_state == RTE_COMP_OP_STATEFUL
1443                         && (ops_processed[i]->status ==
1444                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1445                             || ops_processed[i]->status ==
1446                                 RTE_COMP_OP_STATUS_SUCCESS)) {
1447                         /* collect the output into all_decomp_data */
1448                         const void *ptr = rte_pktmbuf_read(
1449                                         ops_processed[i]->m_dst,
1450                                         ops_processed[i]->dst.offset,
1451                                         ops_processed[i]->produced,
1452                                         all_decomp_data +
1453                                                 decomp_produced_data_size);
1454                         if (ptr != all_decomp_data + decomp_produced_data_size)
1455                                 rte_memcpy(all_decomp_data +
1456                                            decomp_produced_data_size,
1457                                            ptr, ops_processed[i]->produced);
1458                         decomp_produced_data_size += ops_processed[i]->produced;
1459                         if (ops_processed[i]->src.length >
1460                                         ops_processed[i]->consumed) {
1461                                 if (ops_processed[i]->status ==
1462                                                 RTE_COMP_OP_STATUS_SUCCESS) {
1463                                         ret_status = -1;
1464                                         RTE_LOG(ERR, USER1,
1465                                               "Operation finished too early\n");
1466                                         goto exit;
1467                                 }
1468                                 step++;
1469                                 if (step >= test_data->decompress_steps_max) {
1470                                         ret_status = -1;
1471                                         RTE_LOG(ERR, USER1,
1472                                           "Operation exceeded maximum steps\n");
1473                                         goto exit;
1474                                 }
1475                                 ops[i] = ops_processed[i];
1476                                 ops[i]->status =
1477                                                RTE_COMP_OP_STATUS_NOT_PROCESSED;
1478                                 ops[i]->src.offset +=
1479                                                 ops_processed[i]->consumed;
1480                                 ops[i]->src.length -=
1481                                                 ops_processed[i]->consumed;
1482                                 goto next_step;
1483                         } else {
1484                                 /* Compare the original stream with the */
1485                                 /* decompressed stream (in size and the data) */
1486                                 priv_data = (struct priv_op_data *)
1487                                                 (ops_processed[i] + 1);
1488                                 const char *buf1 =
1489                                                 test_bufs[priv_data->orig_idx];
1490                                 const char *buf2 = all_decomp_data;
1491
1492                                 if (compare_buffers(buf1, strlen(buf1) + 1,
1493                                           buf2, decomp_produced_data_size) < 0)
1494                                         goto exit;
1495                                 /* Test checksums */
1496                                 if (compress_xforms[0]->compress.chksum
1497                                                 != RTE_COMP_CHECKSUM_NONE) {
1498                                         if (ops_processed[i]->output_chksum
1499                                                       != compress_checksum[i]) {
1500                                                 RTE_LOG(ERR, USER1,
1501                                                         "The checksums differ\n"
1502                              "Compression Checksum: %" PRIu64 "\tDecompression "
1503                                 "Checksum: %" PRIu64 "\n", compress_checksum[i],
1504                                                ops_processed[i]->output_chksum);
1505                                                 goto exit;
1506                                         }
1507                                 }
1508                         }
1509                 } else if (ops_processed[i]->status !=
1510                            RTE_COMP_OP_STATUS_SUCCESS) {
1511                         RTE_LOG(ERR, USER1,
1512                                 "Some operations were not successful\n");
1513                         goto exit;
1514                 }
1515                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1516                 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1517                 comp_bufs[priv_data->orig_idx] = NULL;
1518         }
1519
1520         if ((out_of_space && oos_zlib_compress)
1521                         || (decompress_state == RTE_COMP_OP_STATEFUL)) {
1522                 ret_status = TEST_SUCCESS;
1523                 goto exit;
1524         }
1525
1526         /*
1527          * Compare the original stream with the decompressed stream
1528          * (in size and the data)
1529          */
1530         for (i = 0; i < num_bufs; i++) {
1531                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1532                 const char *buf1 = test_data->use_external_mbufs ?
1533                                 test_data->inbuf_memzone->addr :
1534                                 test_bufs[priv_data->orig_idx];
1535                 const char *buf2;
1536                 data_size = test_data->use_external_mbufs ?
1537                                 test_data->inbuf_data_size :
1538                                 strlen(buf1) + 1;
1539                 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1540                 if (contig_buf == NULL) {
1541                         RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1542                                         "be allocated\n");
1543                         goto exit;
1544                 }
1545
1546                 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1547                                 ops_processed[i]->produced, contig_buf);
1548                 if (compare_buffers(buf1, data_size,
1549                                 buf2, ops_processed[i]->produced) < 0)
1550                         goto exit;
1551
1552                 /* Test checksums */
1553                 if (compress_xforms[0]->compress.chksum !=
1554                                 RTE_COMP_CHECKSUM_NONE) {
1555                         if (ops_processed[i]->output_chksum !=
1556                                         compress_checksum[i]) {
1557                                 RTE_LOG(ERR, USER1, "The checksums differ\n"
1558                         "Compression Checksum: %" PRIu64 "\tDecompression "
1559                         "Checksum: %" PRIu64 "\n", compress_checksum[i],
1560                         ops_processed[i]->output_chksum);
1561                                 goto exit;
1562                         }
1563                 }
1564
1565                 rte_free(contig_buf);
1566                 contig_buf = NULL;
1567         }
1568
1569         ret_status = TEST_SUCCESS;
1570
1571 exit:
1572         /* Free resources */
1573         for (i = 0; i < num_bufs; i++) {
1574                 rte_pktmbuf_free(uncomp_bufs[i]);
1575                 rte_pktmbuf_free(comp_bufs[i]);
1576                 rte_comp_op_free(ops[i]);
1577                 rte_comp_op_free(ops_processed[i]);
1578         }
1579         for (i = 0; i < num_priv_xforms; i++)
1580                 if (priv_xforms[i] != NULL)
1581                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1582         if (stream != NULL)
1583                 rte_compressdev_stream_free(0, stream);
1584         if (all_decomp_data != NULL)
1585                 rte_free(all_decomp_data);
1586         rte_free(contig_buf);
1587
1588         return ret_status;
1589 }
1590
1591 static int
1592 test_compressdev_deflate_stateless_fixed(void)
1593 {
1594         struct comp_testsuite_params *ts_params = &testsuite_params;
1595         uint16_t i;
1596         int ret;
1597         const struct rte_compressdev_capabilities *capab;
1598
1599         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1600         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1601
1602         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1603                 return -ENOTSUP;
1604
1605         struct rte_comp_xform *compress_xform =
1606                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1607
1608         if (compress_xform == NULL) {
1609                 RTE_LOG(ERR, USER1,
1610                         "Compress xform could not be created\n");
1611                 ret = TEST_FAILED;
1612                 goto exit;
1613         }
1614
1615         memcpy(compress_xform, ts_params->def_comp_xform,
1616                         sizeof(struct rte_comp_xform));
1617         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1618
1619         struct interim_data_params int_data = {
1620                 NULL,
1621                 1,
1622                 NULL,
1623                 &compress_xform,
1624                 &ts_params->def_decomp_xform,
1625                 1
1626         };
1627
1628         struct test_data_params test_data = {
1629                 .compress_state = RTE_COMP_OP_STATELESS,
1630                 .decompress_state = RTE_COMP_OP_STATELESS,
1631                 .buff_type = LB_BOTH,
1632                 .zlib_dir = ZLIB_DECOMPRESS,
1633                 .out_of_space = 0,
1634                 .big_data = 0,
1635                 .overflow = OVERFLOW_DISABLED
1636         };
1637
1638         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1639                 int_data.test_bufs = &compress_test_bufs[i];
1640                 int_data.buf_idx = &i;
1641
1642                 /* Compress with compressdev, decompress with Zlib */
1643                 test_data.zlib_dir = ZLIB_DECOMPRESS;
1644                 ret = test_deflate_comp_decomp(&int_data, &test_data);
1645                 if (ret < 0)
1646                         goto exit;
1647
1648                 /* Compress with Zlib, decompress with compressdev */
1649                 test_data.zlib_dir = ZLIB_COMPRESS;
1650                 ret = test_deflate_comp_decomp(&int_data, &test_data);
1651                 if (ret < 0)
1652                         goto exit;
1653         }
1654
1655         ret = TEST_SUCCESS;
1656
1657 exit:
1658         rte_free(compress_xform);
1659         return ret;
1660 }
1661
1662 static int
1663 test_compressdev_deflate_stateless_dynamic(void)
1664 {
1665         struct comp_testsuite_params *ts_params = &testsuite_params;
1666         uint16_t i;
1667         int ret;
1668         struct rte_comp_xform *compress_xform =
1669                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1670
1671         const struct rte_compressdev_capabilities *capab;
1672
1673         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1674         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1675
1676         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1677                 return -ENOTSUP;
1678
1679         if (compress_xform == NULL) {
1680                 RTE_LOG(ERR, USER1,
1681                         "Compress xform could not be created\n");
1682                 ret = TEST_FAILED;
1683                 goto exit;
1684         }
1685
1686         memcpy(compress_xform, ts_params->def_comp_xform,
1687                         sizeof(struct rte_comp_xform));
1688         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1689
1690         struct interim_data_params int_data = {
1691                 NULL,
1692                 1,
1693                 NULL,
1694                 &compress_xform,
1695                 &ts_params->def_decomp_xform,
1696                 1
1697         };
1698
1699         struct test_data_params test_data = {
1700                 .compress_state = RTE_COMP_OP_STATELESS,
1701                 .decompress_state = RTE_COMP_OP_STATELESS,
1702                 .buff_type = LB_BOTH,
1703                 .zlib_dir = ZLIB_DECOMPRESS,
1704                 .out_of_space = 0,
1705                 .big_data = 0,
1706                 .overflow = OVERFLOW_DISABLED
1707         };
1708
1709         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1710                 int_data.test_bufs = &compress_test_bufs[i];
1711                 int_data.buf_idx = &i;
1712
1713                 /* Compress with compressdev, decompress with Zlib */
1714                 test_data.zlib_dir = ZLIB_DECOMPRESS;
1715                 ret = test_deflate_comp_decomp(&int_data, &test_data);
1716                 if (ret < 0)
1717                         goto exit;
1718
1719                 /* Compress with Zlib, decompress with compressdev */
1720                 test_data.zlib_dir = ZLIB_COMPRESS;
1721                 ret = test_deflate_comp_decomp(&int_data, &test_data);
1722                 if (ret < 0)
1723                         goto exit;
1724         }
1725
1726         ret = TEST_SUCCESS;
1727
1728 exit:
1729         rte_free(compress_xform);
1730         return ret;
1731 }
1732
1733 static int
1734 test_compressdev_deflate_stateless_multi_op(void)
1735 {
1736         struct comp_testsuite_params *ts_params = &testsuite_params;
1737         uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1738         uint16_t buf_idx[num_bufs];
1739         uint16_t i;
1740         int ret;
1741
1742         for (i = 0; i < num_bufs; i++)
1743                 buf_idx[i] = i;
1744
1745         struct interim_data_params int_data = {
1746                 compress_test_bufs,
1747                 num_bufs,
1748                 buf_idx,
1749                 &ts_params->def_comp_xform,
1750                 &ts_params->def_decomp_xform,
1751                 1
1752         };
1753
1754         struct test_data_params test_data = {
1755                 .compress_state = RTE_COMP_OP_STATELESS,
1756                 .decompress_state = RTE_COMP_OP_STATELESS,
1757                 .buff_type = LB_BOTH,
1758                 .zlib_dir = ZLIB_DECOMPRESS,
1759                 .out_of_space = 0,
1760                 .big_data = 0,
1761                 .overflow = OVERFLOW_DISABLED
1762         };
1763
1764         /* Compress with compressdev, decompress with Zlib */
1765         test_data.zlib_dir = ZLIB_DECOMPRESS;
1766         ret = test_deflate_comp_decomp(&int_data, &test_data);
1767         if (ret < 0)
1768                 return ret;
1769
1770         /* Compress with Zlib, decompress with compressdev */
1771         test_data.zlib_dir = ZLIB_COMPRESS;
1772         ret = test_deflate_comp_decomp(&int_data, &test_data);
1773         if (ret < 0)
1774                 return ret;
1775
1776         return TEST_SUCCESS;
1777 }
1778
1779 static int
1780 test_compressdev_deflate_stateless_multi_level(void)
1781 {
1782         struct comp_testsuite_params *ts_params = &testsuite_params;
1783         unsigned int level;
1784         uint16_t i;
1785         int ret;
1786         struct rte_comp_xform *compress_xform =
1787                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1788
1789         if (compress_xform == NULL) {
1790                 RTE_LOG(ERR, USER1,
1791                         "Compress xform could not be created\n");
1792                 ret = TEST_FAILED;
1793                 goto exit;
1794         }
1795
1796         memcpy(compress_xform, ts_params->def_comp_xform,
1797                         sizeof(struct rte_comp_xform));
1798
1799         struct interim_data_params int_data = {
1800                 NULL,
1801                 1,
1802                 NULL,
1803                 &compress_xform,
1804                 &ts_params->def_decomp_xform,
1805                 1
1806         };
1807
1808         struct test_data_params test_data = {
1809                 .compress_state = RTE_COMP_OP_STATELESS,
1810                 .decompress_state = RTE_COMP_OP_STATELESS,
1811                 .buff_type = LB_BOTH,
1812                 .zlib_dir = ZLIB_DECOMPRESS,
1813                 .out_of_space = 0,
1814                 .big_data = 0,
1815                 .overflow = OVERFLOW_DISABLED
1816         };
1817
1818         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1819                 int_data.test_bufs = &compress_test_bufs[i];
1820                 int_data.buf_idx = &i;
1821
1822                 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1823                                 level++) {
1824                         compress_xform->compress.level = level;
1825                         /* Compress with compressdev, decompress with Zlib */
1826                         test_data.zlib_dir = ZLIB_DECOMPRESS;
1827                         ret = test_deflate_comp_decomp(&int_data, &test_data);
1828                         if (ret < 0)
1829                                 goto exit;
1830                 }
1831         }
1832
1833         ret = TEST_SUCCESS;
1834
1835 exit:
1836         rte_free(compress_xform);
1837         return ret;
1838 }
1839
1840 #define NUM_XFORMS 3
1841 static int
1842 test_compressdev_deflate_stateless_multi_xform(void)
1843 {
1844         struct comp_testsuite_params *ts_params = &testsuite_params;
1845         uint16_t num_bufs = NUM_XFORMS;
1846         struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1847         struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1848         const char *test_buffers[NUM_XFORMS];
1849         uint16_t i;
1850         unsigned int level = RTE_COMP_LEVEL_MIN;
1851         uint16_t buf_idx[num_bufs];
1852         int ret;
1853
1854         /* Create multiple xforms with various levels */
1855         for (i = 0; i < NUM_XFORMS; i++) {
1856                 compress_xforms[i] = rte_malloc(NULL,
1857                                 sizeof(struct rte_comp_xform), 0);
1858                 if (compress_xforms[i] == NULL) {
1859                         RTE_LOG(ERR, USER1,
1860                                 "Compress xform could not be created\n");
1861                         ret = TEST_FAILED;
1862                         goto exit;
1863                 }
1864
1865                 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1866                                 sizeof(struct rte_comp_xform));
1867                 compress_xforms[i]->compress.level = level;
1868                 level++;
1869
1870                 decompress_xforms[i] = rte_malloc(NULL,
1871                                 sizeof(struct rte_comp_xform), 0);
1872                 if (decompress_xforms[i] == NULL) {
1873                         RTE_LOG(ERR, USER1,
1874                                 "Decompress xform could not be created\n");
1875                         ret = TEST_FAILED;
1876                         goto exit;
1877                 }
1878
1879                 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1880                                 sizeof(struct rte_comp_xform));
1881         }
1882
1883         for (i = 0; i < NUM_XFORMS; i++) {
1884                 buf_idx[i] = 0;
1885                 /* Use the same buffer in all sessions */
1886                 test_buffers[i] = compress_test_bufs[0];
1887         }
1888
1889         struct interim_data_params int_data = {
1890                 test_buffers,
1891                 num_bufs,
1892                 buf_idx,
1893                 compress_xforms,
1894                 decompress_xforms,
1895                 NUM_XFORMS
1896         };
1897
1898         struct test_data_params test_data = {
1899                 .compress_state = RTE_COMP_OP_STATELESS,
1900                 .decompress_state = RTE_COMP_OP_STATELESS,
1901                 .buff_type = LB_BOTH,
1902                 .zlib_dir = ZLIB_DECOMPRESS,
1903                 .out_of_space = 0,
1904                 .big_data = 0,
1905                 .overflow = OVERFLOW_DISABLED
1906         };
1907
1908         /* Compress with compressdev, decompress with Zlib */
1909         ret = test_deflate_comp_decomp(&int_data, &test_data);
1910         if (ret < 0)
1911                 goto exit;
1912
1913         ret = TEST_SUCCESS;
1914
1915 exit:
1916         for (i = 0; i < NUM_XFORMS; i++) {
1917                 rte_free(compress_xforms[i]);
1918                 rte_free(decompress_xforms[i]);
1919         }
1920
1921         return ret;
1922 }
1923
1924 static int
1925 test_compressdev_deflate_stateless_sgl(void)
1926 {
1927         struct comp_testsuite_params *ts_params = &testsuite_params;
1928         uint16_t i;
1929         int ret;
1930         const struct rte_compressdev_capabilities *capab;
1931
1932         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1933         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1934
1935         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1936                 return -ENOTSUP;
1937
1938         struct interim_data_params int_data = {
1939                 NULL,
1940                 1,
1941                 NULL,
1942                 &ts_params->def_comp_xform,
1943                 &ts_params->def_decomp_xform,
1944                 1
1945         };
1946
1947         struct test_data_params test_data = {
1948                 .compress_state = RTE_COMP_OP_STATELESS,
1949                 .decompress_state = RTE_COMP_OP_STATELESS,
1950                 .buff_type = SGL_BOTH,
1951                 .zlib_dir = ZLIB_DECOMPRESS,
1952                 .out_of_space = 0,
1953                 .big_data = 0,
1954                 .overflow = OVERFLOW_DISABLED
1955         };
1956
1957         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1958                 int_data.test_bufs = &compress_test_bufs[i];
1959                 int_data.buf_idx = &i;
1960
1961                 /* Compress with compressdev, decompress with Zlib */
1962                 test_data.zlib_dir = ZLIB_DECOMPRESS;
1963                 ret = test_deflate_comp_decomp(&int_data, &test_data);
1964                 if (ret < 0)
1965                         return ret;
1966
1967                 /* Compress with Zlib, decompress with compressdev */
1968                 test_data.zlib_dir = ZLIB_COMPRESS;
1969                 ret = test_deflate_comp_decomp(&int_data, &test_data);
1970                 if (ret < 0)
1971                         return ret;
1972
1973                 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1974                         /* Compress with compressdev, decompress with Zlib */
1975                         test_data.zlib_dir = ZLIB_DECOMPRESS;
1976                         test_data.buff_type = SGL_TO_LB;
1977                         ret = test_deflate_comp_decomp(&int_data, &test_data);
1978                         if (ret < 0)
1979                                 return ret;
1980
1981                         /* Compress with Zlib, decompress with compressdev */
1982                         test_data.zlib_dir = ZLIB_COMPRESS;
1983                         test_data.buff_type = SGL_TO_LB;
1984                         ret = test_deflate_comp_decomp(&int_data, &test_data);
1985                         if (ret < 0)
1986                                 return ret;
1987                 }
1988
1989                 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1990                         /* Compress with compressdev, decompress with Zlib */
1991                         test_data.zlib_dir = ZLIB_DECOMPRESS;
1992                         test_data.buff_type = LB_TO_SGL;
1993                         ret = test_deflate_comp_decomp(&int_data, &test_data);
1994                         if (ret < 0)
1995                                 return ret;
1996
1997                         /* Compress with Zlib, decompress with compressdev */
1998                         test_data.zlib_dir = ZLIB_COMPRESS;
1999                         test_data.buff_type = LB_TO_SGL;
2000                         ret = test_deflate_comp_decomp(&int_data, &test_data);
2001                         if (ret < 0)
2002                                 return ret;
2003                 }
2004         }
2005
2006         return TEST_SUCCESS;
2007 }
2008
2009 static int
2010 test_compressdev_deflate_stateless_checksum(void)
2011 {
2012         struct comp_testsuite_params *ts_params = &testsuite_params;
2013         uint16_t i;
2014         int ret;
2015         const struct rte_compressdev_capabilities *capab;
2016
2017         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2018         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2019
2020         /* Check if driver supports any checksum */
2021         if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
2022                         (capab->comp_feature_flags &
2023                         RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
2024                         (capab->comp_feature_flags &
2025                         RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
2026                 return -ENOTSUP;
2027
2028         struct rte_comp_xform *compress_xform =
2029                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2030         if (compress_xform == NULL) {
2031                 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2032                 return TEST_FAILED;
2033         }
2034
2035         memcpy(compress_xform, ts_params->def_comp_xform,
2036                         sizeof(struct rte_comp_xform));
2037
2038         struct rte_comp_xform *decompress_xform =
2039                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2040         if (decompress_xform == NULL) {
2041                 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2042                 rte_free(compress_xform);
2043                 return TEST_FAILED;
2044         }
2045
2046         memcpy(decompress_xform, ts_params->def_decomp_xform,
2047                         sizeof(struct rte_comp_xform));
2048
2049         struct interim_data_params int_data = {
2050                 NULL,
2051                 1,
2052                 NULL,
2053                 &compress_xform,
2054                 &decompress_xform,
2055                 1
2056         };
2057
2058         struct test_data_params test_data = {
2059                 .compress_state = RTE_COMP_OP_STATELESS,
2060                 .decompress_state = RTE_COMP_OP_STATELESS,
2061                 .buff_type = LB_BOTH,
2062                 .zlib_dir = ZLIB_DECOMPRESS,
2063                 .out_of_space = 0,
2064                 .big_data = 0,
2065                 .overflow = OVERFLOW_DISABLED
2066         };
2067
2068         /* Check if driver supports crc32 checksum and test */
2069         if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
2070                 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2071                 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2072
2073                 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2074                         /* Compress with compressdev, decompress with Zlib */
2075                         int_data.test_bufs = &compress_test_bufs[i];
2076                         int_data.buf_idx = &i;
2077
2078                         /* Generate zlib checksum and test against selected
2079                          * drivers decompression checksum
2080                          */
2081                         test_data.zlib_dir = ZLIB_COMPRESS;
2082                         ret = test_deflate_comp_decomp(&int_data, &test_data);
2083                         if (ret < 0)
2084                                 goto exit;
2085
2086                         /* Generate compression and decompression
2087                          * checksum of selected driver
2088                          */
2089                         test_data.zlib_dir = ZLIB_NONE;
2090                         ret = test_deflate_comp_decomp(&int_data, &test_data);
2091                         if (ret < 0)
2092                                 goto exit;
2093                 }
2094         }
2095
2096         /* Check if driver supports adler32 checksum and test */
2097         if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2098                 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2099                 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2100
2101                 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2102                         int_data.test_bufs = &compress_test_bufs[i];
2103                         int_data.buf_idx = &i;
2104
2105                         /* Generate zlib checksum and test against selected
2106                          * drivers decompression checksum
2107                          */
2108                         test_data.zlib_dir = ZLIB_COMPRESS;
2109                         ret = test_deflate_comp_decomp(&int_data, &test_data);
2110                         if (ret < 0)
2111                                 goto exit;
2112                         /* Generate compression and decompression
2113                          * checksum of selected driver
2114                          */
2115                         test_data.zlib_dir = ZLIB_NONE;
2116                         ret = test_deflate_comp_decomp(&int_data, &test_data);
2117                         if (ret < 0)
2118                                 goto exit;
2119                 }
2120         }
2121
2122         /* Check if driver supports combined crc and adler checksum and test */
2123         if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2124                 compress_xform->compress.chksum =
2125                                 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2126                 decompress_xform->decompress.chksum =
2127                                 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2128
2129                 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2130                         int_data.test_bufs = &compress_test_bufs[i];
2131                         int_data.buf_idx = &i;
2132
2133                         /* Generate compression and decompression
2134                          * checksum of selected driver
2135                          */
2136                         test_data.zlib_dir = ZLIB_NONE;
2137                         ret = test_deflate_comp_decomp(&int_data, &test_data);
2138                         if (ret < 0)
2139                                 goto exit;
2140                 }
2141         }
2142
2143         ret = TEST_SUCCESS;
2144
2145 exit:
2146         rte_free(compress_xform);
2147         rte_free(decompress_xform);
2148         return ret;
2149 }
2150
2151 static int
2152 test_compressdev_out_of_space_buffer(void)
2153 {
2154         struct comp_testsuite_params *ts_params = &testsuite_params;
2155         int ret;
2156         uint16_t i;
2157         const struct rte_compressdev_capabilities *capab;
2158
2159         RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2160
2161         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2162         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2163
2164         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2165                 return -ENOTSUP;
2166
2167         struct interim_data_params int_data = {
2168                 &compress_test_bufs[0],
2169                 1,
2170                 &i,
2171                 &ts_params->def_comp_xform,
2172                 &ts_params->def_decomp_xform,
2173                 1
2174         };
2175
2176         struct test_data_params test_data = {
2177                 .compress_state = RTE_COMP_OP_STATELESS,
2178                 .decompress_state = RTE_COMP_OP_STATELESS,
2179                 .buff_type = LB_BOTH,
2180                 .zlib_dir = ZLIB_DECOMPRESS,
2181                 .out_of_space = 1,  /* run out-of-space test */
2182                 .big_data = 0,
2183                 .overflow = OVERFLOW_DISABLED
2184         };
2185         /* Compress with compressdev, decompress with Zlib */
2186         test_data.zlib_dir = ZLIB_DECOMPRESS;
2187         ret = test_deflate_comp_decomp(&int_data, &test_data);
2188         if (ret < 0)
2189                 goto exit;
2190
2191         /* Compress with Zlib, decompress with compressdev */
2192         test_data.zlib_dir = ZLIB_COMPRESS;
2193         ret = test_deflate_comp_decomp(&int_data, &test_data);
2194         if (ret < 0)
2195                 goto exit;
2196
2197         if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2198                 /* Compress with compressdev, decompress with Zlib */
2199                 test_data.zlib_dir = ZLIB_DECOMPRESS;
2200                 test_data.buff_type = SGL_BOTH;
2201                 ret = test_deflate_comp_decomp(&int_data, &test_data);
2202                 if (ret < 0)
2203                         goto exit;
2204
2205                 /* Compress with Zlib, decompress with compressdev */
2206                 test_data.zlib_dir = ZLIB_COMPRESS;
2207                 test_data.buff_type = SGL_BOTH;
2208                 ret = test_deflate_comp_decomp(&int_data, &test_data);
2209                 if (ret < 0)
2210                         goto exit;
2211         }
2212
2213         ret  = TEST_SUCCESS;
2214
2215 exit:
2216         return ret;
2217 }
2218
2219 static int
2220 test_compressdev_deflate_stateless_dynamic_big(void)
2221 {
2222         struct comp_testsuite_params *ts_params = &testsuite_params;
2223         uint16_t i = 0;
2224         int ret;
2225         int j;
2226         const struct rte_compressdev_capabilities *capab;
2227         char *test_buffer = NULL;
2228
2229         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2230         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2231
2232         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2233                 return -ENOTSUP;
2234
2235         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2236                 return -ENOTSUP;
2237
2238         test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2239         if (test_buffer == NULL) {
2240                 RTE_LOG(ERR, USER1,
2241                         "Can't allocate buffer for big-data\n");
2242                 return TEST_FAILED;
2243         }
2244
2245         struct interim_data_params int_data = {
2246                 (const char * const *)&test_buffer,
2247                 1,
2248                 &i,
2249                 &ts_params->def_comp_xform,
2250                 &ts_params->def_decomp_xform,
2251                 1
2252         };
2253
2254         struct test_data_params test_data = {
2255                 .compress_state = RTE_COMP_OP_STATELESS,
2256                 .decompress_state = RTE_COMP_OP_STATELESS,
2257                 .buff_type = SGL_BOTH,
2258                 .zlib_dir = ZLIB_DECOMPRESS,
2259                 .out_of_space = 0,
2260                 .big_data = 1,
2261                 .overflow = OVERFLOW_DISABLED
2262         };
2263
2264         ts_params->def_comp_xform->compress.deflate.huffman =
2265                                                 RTE_COMP_HUFFMAN_DYNAMIC;
2266
2267         /* fill the buffer with data based on rand. data */
2268         srand(BIG_DATA_TEST_SIZE);
2269         for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2270                 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2271         test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2272
2273         /* Compress with compressdev, decompress with Zlib */
2274         test_data.zlib_dir = ZLIB_DECOMPRESS;
2275         ret = test_deflate_comp_decomp(&int_data, &test_data);
2276         if (ret < 0)
2277                 goto exit;
2278
2279         /* Compress with Zlib, decompress with compressdev */
2280         test_data.zlib_dir = ZLIB_COMPRESS;
2281         ret = test_deflate_comp_decomp(&int_data, &test_data);
2282         if (ret < 0)
2283                 goto exit;
2284
2285         ret = TEST_SUCCESS;
2286
2287 exit:
2288         ts_params->def_comp_xform->compress.deflate.huffman =
2289                                                 RTE_COMP_HUFFMAN_DEFAULT;
2290         rte_free(test_buffer);
2291         return ret;
2292 }
2293
2294 static int
2295 test_compressdev_deflate_stateful_decomp(void)
2296 {
2297         struct comp_testsuite_params *ts_params = &testsuite_params;
2298         int ret;
2299         uint16_t i;
2300         const struct rte_compressdev_capabilities *capab;
2301
2302         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2303         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2304
2305         if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2306                 return -ENOTSUP;
2307
2308         struct interim_data_params int_data = {
2309                 &compress_test_bufs[0],
2310                 1,
2311                 &i,
2312                 &ts_params->def_comp_xform,
2313                 &ts_params->def_decomp_xform,
2314                 1
2315         };
2316
2317         struct test_data_params test_data = {
2318                 .compress_state = RTE_COMP_OP_STATELESS,
2319                 .decompress_state = RTE_COMP_OP_STATEFUL,
2320                 .buff_type = LB_BOTH,
2321                 .zlib_dir = ZLIB_COMPRESS,
2322                 .out_of_space = 0,
2323                 .big_data = 0,
2324                 .decompress_output_block_size = 2000,
2325                 .decompress_steps_max = 4,
2326                 .overflow = OVERFLOW_DISABLED
2327         };
2328
2329         /* Compress with Zlib, decompress with compressdev */
2330         if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2331                 ret = TEST_FAILED;
2332                 goto exit;
2333         }
2334
2335         if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2336                 /* Now test with SGL buffers */
2337                 test_data.buff_type = SGL_BOTH;
2338                 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2339                         ret = TEST_FAILED;
2340                         goto exit;
2341                 }
2342         }
2343
2344         ret  = TEST_SUCCESS;
2345
2346 exit:
2347         return ret;
2348 }
2349
2350 static int
2351 test_compressdev_deflate_stateful_decomp_checksum(void)
2352 {
2353         struct comp_testsuite_params *ts_params = &testsuite_params;
2354         int ret;
2355         uint16_t i;
2356         const struct rte_compressdev_capabilities *capab;
2357
2358         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2359         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2360
2361         if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2362                 return -ENOTSUP;
2363
2364         /* Check if driver supports any checksum */
2365         if (!(capab->comp_feature_flags &
2366              (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2367               RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2368                 return -ENOTSUP;
2369
2370         struct rte_comp_xform *compress_xform =
2371                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2372         if (compress_xform == NULL) {
2373                 RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2374                 return TEST_FAILED;
2375         }
2376
2377         memcpy(compress_xform, ts_params->def_comp_xform,
2378                sizeof(struct rte_comp_xform));
2379
2380         struct rte_comp_xform *decompress_xform =
2381                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2382         if (decompress_xform == NULL) {
2383                 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2384                 rte_free(compress_xform);
2385                 return TEST_FAILED;
2386         }
2387
2388         memcpy(decompress_xform, ts_params->def_decomp_xform,
2389                sizeof(struct rte_comp_xform));
2390
2391         struct interim_data_params int_data = {
2392                 &compress_test_bufs[0],
2393                 1,
2394                 &i,
2395                 &compress_xform,
2396                 &decompress_xform,
2397                 1
2398         };
2399
2400         struct test_data_params test_data = {
2401                 .compress_state = RTE_COMP_OP_STATELESS,
2402                 .decompress_state = RTE_COMP_OP_STATEFUL,
2403                 .buff_type = LB_BOTH,
2404                 .zlib_dir = ZLIB_COMPRESS,
2405                 .out_of_space = 0,
2406                 .big_data = 0,
2407                 .decompress_output_block_size = 2000,
2408                 .decompress_steps_max = 4,
2409                 .overflow = OVERFLOW_DISABLED
2410         };
2411
2412         /* Check if driver supports crc32 checksum and test */
2413         if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2414                 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2415                 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2416                 /* Compress with Zlib, decompress with compressdev */
2417                 test_data.buff_type = LB_BOTH;
2418                 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2419                         ret = TEST_FAILED;
2420                         goto exit;
2421                 }
2422                 if (capab->comp_feature_flags &
2423                                 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2424                         /* Now test with SGL buffers */
2425                         test_data.buff_type = SGL_BOTH;
2426                         if (test_deflate_comp_decomp(&int_data,
2427                                                      &test_data) < 0) {
2428                                 ret = TEST_FAILED;
2429                                 goto exit;
2430                         }
2431                 }
2432         }
2433
2434         /* Check if driver supports adler32 checksum and test */
2435         if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2436                 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2437                 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2438                 /* Compress with Zlib, decompress with compressdev */
2439                 test_data.buff_type = LB_BOTH;
2440                 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2441                         ret = TEST_FAILED;
2442                         goto exit;
2443                 }
2444                 if (capab->comp_feature_flags &
2445                                 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2446                         /* Now test with SGL buffers */
2447                         test_data.buff_type = SGL_BOTH;
2448                         if (test_deflate_comp_decomp(&int_data,
2449                                                      &test_data) < 0) {
2450                                 ret = TEST_FAILED;
2451                                 goto exit;
2452                         }
2453                 }
2454         }
2455
2456         /* Check if driver supports combined crc and adler checksum and test */
2457         if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2458                 compress_xform->compress.chksum =
2459                                 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2460                 decompress_xform->decompress.chksum =
2461                                 RTE_COMP_CHECKSUM_CRC32_ADLER32;
2462                 /* Zlib doesn't support combined checksum */
2463                 test_data.zlib_dir = ZLIB_NONE;
2464                 /* Compress stateless, decompress stateful with compressdev */
2465                 test_data.buff_type = LB_BOTH;
2466                 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2467                         ret = TEST_FAILED;
2468                         goto exit;
2469                 }
2470                 if (capab->comp_feature_flags &
2471                                 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2472                         /* Now test with SGL buffers */
2473                         test_data.buff_type = SGL_BOTH;
2474                         if (test_deflate_comp_decomp(&int_data,
2475                                                      &test_data) < 0) {
2476                                 ret = TEST_FAILED;
2477                                 goto exit;
2478                         }
2479                 }
2480         }
2481
2482         ret  = TEST_SUCCESS;
2483
2484 exit:
2485         rte_free(compress_xform);
2486         rte_free(decompress_xform);
2487         return ret;
2488 }
2489
2490 static const struct rte_memzone *
2491 make_memzone(const char *name, size_t size)
2492 {
2493         unsigned int socket_id = rte_socket_id();
2494         char mz_name[RTE_MEMZONE_NAMESIZE];
2495         const struct rte_memzone *memzone;
2496
2497         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
2498         memzone = rte_memzone_lookup(mz_name);
2499         if (memzone != NULL && memzone->len != size) {
2500                 rte_memzone_free(memzone);
2501                 memzone = NULL;
2502         }
2503         if (memzone == NULL) {
2504                 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
2505                                 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2506                 if (memzone == NULL)
2507                         RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
2508                                 mz_name);
2509         }
2510         return memzone;
2511 }
2512
2513 static int
2514 test_compressdev_external_mbufs(void)
2515 {
2516         struct comp_testsuite_params *ts_params = &testsuite_params;
2517         size_t data_len = 0;
2518         uint16_t i;
2519         int ret = TEST_FAILED;
2520
2521         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
2522                 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
2523
2524         struct interim_data_params int_data = {
2525                 NULL,
2526                 1,
2527                 NULL,
2528                 &ts_params->def_comp_xform,
2529                 &ts_params->def_decomp_xform,
2530                 1
2531         };
2532
2533         struct test_data_params test_data = {
2534                 .compress_state = RTE_COMP_OP_STATELESS,
2535                 .decompress_state = RTE_COMP_OP_STATELESS,
2536                 .buff_type = LB_BOTH,
2537                 .zlib_dir = ZLIB_DECOMPRESS,
2538                 .out_of_space = 0,
2539                 .big_data = 0,
2540                 .use_external_mbufs = 1,
2541                 .inbuf_data_size = data_len,
2542                 .inbuf_memzone = make_memzone("inbuf", data_len),
2543                 .compbuf_memzone = make_memzone("compbuf", data_len *
2544                                                 COMPRESS_BUF_SIZE_RATIO),
2545                 .uncompbuf_memzone = make_memzone("decompbuf", data_len),
2546                 .overflow = OVERFLOW_DISABLED
2547         };
2548
2549         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2550                 /* prepare input data */
2551                 data_len = strlen(compress_test_bufs[i]) + 1;
2552                 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
2553                            data_len);
2554                 test_data.inbuf_data_size = data_len;
2555                 int_data.buf_idx = &i;
2556
2557                 /* Compress with compressdev, decompress with Zlib */
2558                 test_data.zlib_dir = ZLIB_DECOMPRESS;
2559                 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2560                         goto exit;
2561
2562                 /* Compress with Zlib, decompress with compressdev */
2563                 test_data.zlib_dir = ZLIB_COMPRESS;
2564                 if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2565                         goto exit;
2566         }
2567
2568         ret = TEST_SUCCESS;
2569
2570 exit:
2571         rte_memzone_free(test_data.inbuf_memzone);
2572         rte_memzone_free(test_data.compbuf_memzone);
2573         rte_memzone_free(test_data.uncompbuf_memzone);
2574         return ret;
2575 }
2576
2577 static int
2578 test_compressdev_deflate_stateless_fixed_oos_recoverable(void)
2579 {
2580         struct comp_testsuite_params *ts_params = &testsuite_params;
2581         uint16_t i;
2582         int ret;
2583         int comp_result;
2584         const struct rte_compressdev_capabilities *capab;
2585
2586         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2587         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2588
2589         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2590                 return -ENOTSUP;
2591
2592         struct rte_comp_xform *compress_xform =
2593                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2594
2595         if (compress_xform == NULL) {
2596                 RTE_LOG(ERR, USER1,
2597                         "Compress xform could not be created\n");
2598                 ret = TEST_FAILED;
2599                 goto exit;
2600         }
2601
2602         memcpy(compress_xform, ts_params->def_comp_xform,
2603                         sizeof(struct rte_comp_xform));
2604         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
2605
2606         struct interim_data_params int_data = {
2607                 NULL,
2608                 1,
2609                 NULL,
2610                 &compress_xform,
2611                 &ts_params->def_decomp_xform,
2612                 1
2613         };
2614
2615         struct test_data_params test_data = {
2616                 .compress_state = RTE_COMP_OP_STATELESS,
2617                 .decompress_state = RTE_COMP_OP_STATELESS,
2618                 .buff_type = LB_BOTH,
2619                 .zlib_dir = ZLIB_DECOMPRESS,
2620                 .out_of_space = 0,
2621                 .big_data = 0,
2622                 .overflow = OVERFLOW_ENABLED
2623         };
2624
2625         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2626                 int_data.test_bufs = &compress_test_bufs[i];
2627                 int_data.buf_idx = &i;
2628
2629                 /* Compress with compressdev, decompress with Zlib */
2630                 test_data.zlib_dir = ZLIB_DECOMPRESS;
2631                 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
2632                 if (comp_result < 0) {
2633                         ret = TEST_FAILED;
2634                         goto exit;
2635                 } else if (comp_result > 0) {
2636                         ret = -ENOTSUP;
2637                         goto exit;
2638                 }
2639
2640                 /* Compress with Zlib, decompress with compressdev */
2641                 test_data.zlib_dir = ZLIB_COMPRESS;
2642                 comp_result = test_deflate_comp_decomp(&int_data, &test_data);
2643                 if (comp_result < 0) {
2644                         ret = TEST_FAILED;
2645                         goto exit;
2646                 } else if (comp_result > 0) {
2647                         ret = -ENOTSUP;
2648                         goto exit;
2649                 }
2650         }
2651
2652         ret = TEST_SUCCESS;
2653
2654 exit:
2655         rte_free(compress_xform);
2656         return ret;
2657 }
2658
2659 static struct unit_test_suite compressdev_testsuite  = {
2660         .suite_name = "compressdev unit test suite",
2661         .setup = testsuite_setup,
2662         .teardown = testsuite_teardown,
2663         .unit_test_cases = {
2664                 TEST_CASE_ST(NULL, NULL,
2665                         test_compressdev_invalid_configuration),
2666                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2667                         test_compressdev_deflate_stateless_fixed),
2668                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2669                         test_compressdev_deflate_stateless_dynamic),
2670                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2671                         test_compressdev_deflate_stateless_dynamic_big),
2672                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2673                         test_compressdev_deflate_stateless_multi_op),
2674                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2675                         test_compressdev_deflate_stateless_multi_level),
2676                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2677                         test_compressdev_deflate_stateless_multi_xform),
2678                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2679                         test_compressdev_deflate_stateless_sgl),
2680                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2681                         test_compressdev_deflate_stateless_checksum),
2682                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2683                         test_compressdev_out_of_space_buffer),
2684                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2685                         test_compressdev_deflate_stateful_decomp),
2686                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2687                         test_compressdev_deflate_stateful_decomp_checksum),
2688                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2689                         test_compressdev_external_mbufs),
2690                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2691                 test_compressdev_deflate_stateless_fixed_oos_recoverable),
2692                 TEST_CASES_END() /**< NULL terminate unit test array */
2693         }
2694 };
2695
2696 static int
2697 test_compressdev(void)
2698 {
2699         return unit_test_suite_runner(&compressdev_testsuite);
2700 }
2701
2702 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);