test/compress: improve debug trace setup
[dpdk.git] / test / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_compressdev.h>
13 #include <rte_string_fns.h>
14
15 #include "test_compressdev_test_buffer.h"
16 #include "test.h"
17
18 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
19
20 #define DEFAULT_WINDOW_SIZE 15
21 #define DEFAULT_MEM_LEVEL 8
22 #define MAX_DEQD_RETRIES 10
23 #define DEQUEUE_WAIT_TIME 10000
24
25 /*
26  * 30% extra size for compressed data compared to original data,
27  * in case data size cannot be reduced and it is actually bigger
28  * due to the compress block headers
29  */
30 #define COMPRESS_BUF_SIZE_RATIO 1.3
31 #define NUM_LARGE_MBUFS 16
32 #define SMALL_SEG_SIZE 256
33 #define MAX_SEGS 16
34 #define NUM_OPS 16
35 #define NUM_MAX_XFORMS 16
36 #define NUM_MAX_INFLIGHT_OPS 128
37 #define CACHE_SIZE 0
38
39 const char *
40 huffman_type_strings[] = {
41         [RTE_COMP_HUFFMAN_DEFAULT]      = "PMD default",
42         [RTE_COMP_HUFFMAN_FIXED]        = "Fixed",
43         [RTE_COMP_HUFFMAN_DYNAMIC]      = "Dynamic"
44 };
45
46 enum zlib_direction {
47         ZLIB_NONE,
48         ZLIB_COMPRESS,
49         ZLIB_DECOMPRESS,
50         ZLIB_ALL
51 };
52
53 struct priv_op_data {
54         uint16_t orig_idx;
55 };
56
57 struct comp_testsuite_params {
58         struct rte_mempool *large_mbuf_pool;
59         struct rte_mempool *small_mbuf_pool;
60         struct rte_mempool *op_pool;
61         struct rte_comp_xform *def_comp_xform;
62         struct rte_comp_xform *def_decomp_xform;
63 };
64
65 static struct comp_testsuite_params testsuite_params = { 0 };
66
67 static void
68 testsuite_teardown(void)
69 {
70         struct comp_testsuite_params *ts_params = &testsuite_params;
71
72         rte_mempool_free(ts_params->large_mbuf_pool);
73         rte_mempool_free(ts_params->small_mbuf_pool);
74         rte_mempool_free(ts_params->op_pool);
75         rte_free(ts_params->def_comp_xform);
76         rte_free(ts_params->def_decomp_xform);
77 }
78
79 static int
80 testsuite_setup(void)
81 {
82         struct comp_testsuite_params *ts_params = &testsuite_params;
83         uint32_t max_buf_size = 0;
84         unsigned int i;
85
86         if (rte_compressdev_count() == 0) {
87                 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
88                 return TEST_FAILED;
89         }
90
91         RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
92                                 rte_compressdev_name_get(0));
93
94         for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
95                 max_buf_size = RTE_MAX(max_buf_size,
96                                 strlen(compress_test_bufs[i]) + 1);
97
98         /*
99          * Buffers to be used in compression and decompression.
100          * Since decompressed data might be larger than
101          * compressed data (due to block header),
102          * buffers should be big enough for both cases.
103          */
104         max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
105         ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
106                         NUM_LARGE_MBUFS,
107                         CACHE_SIZE, 0,
108                         max_buf_size + RTE_PKTMBUF_HEADROOM,
109                         rte_socket_id());
110         if (ts_params->large_mbuf_pool == NULL) {
111                 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
112                 return TEST_FAILED;
113         }
114
115         /* Create mempool with smaller buffers for SGL testing */
116         ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
117                         NUM_LARGE_MBUFS * MAX_SEGS,
118                         CACHE_SIZE, 0,
119                         SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
120                         rte_socket_id());
121         if (ts_params->small_mbuf_pool == NULL) {
122                 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
123                 goto exit;
124         }
125
126         ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
127                                 0, sizeof(struct priv_op_data),
128                                 rte_socket_id());
129         if (ts_params->op_pool == NULL) {
130                 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
131                 goto exit;
132         }
133
134         ts_params->def_comp_xform =
135                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
136         if (ts_params->def_comp_xform == NULL) {
137                 RTE_LOG(ERR, USER1,
138                         "Default compress xform could not be created\n");
139                 goto exit;
140         }
141         ts_params->def_decomp_xform =
142                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
143         if (ts_params->def_decomp_xform == NULL) {
144                 RTE_LOG(ERR, USER1,
145                         "Default decompress xform could not be created\n");
146                 goto exit;
147         }
148
149         /* Initializes default values for compress/decompress xforms */
150         ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
151         ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
152         ts_params->def_comp_xform->compress.deflate.huffman =
153                                                 RTE_COMP_HUFFMAN_DEFAULT;
154         ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
155         ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
156         ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
157
158         ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
159         ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
160         ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
161         ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
162
163         return TEST_SUCCESS;
164
165 exit:
166         testsuite_teardown();
167
168         return TEST_FAILED;
169 }
170
171 static int
172 generic_ut_setup(void)
173 {
174         /* Configure compressdev (one device, one queue pair) */
175         struct rte_compressdev_config config = {
176                 .socket_id = rte_socket_id(),
177                 .nb_queue_pairs = 1,
178                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
179                 .max_nb_streams = 0
180         };
181
182         if (rte_compressdev_configure(0, &config) < 0) {
183                 RTE_LOG(ERR, USER1, "Device configuration failed\n");
184                 return -1;
185         }
186
187         if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
188                         rte_socket_id()) < 0) {
189                 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
190                 return -1;
191         }
192
193         if (rte_compressdev_start(0) < 0) {
194                 RTE_LOG(ERR, USER1, "Device could not be started\n");
195                 return -1;
196         }
197
198         return 0;
199 }
200
201 static void
202 generic_ut_teardown(void)
203 {
204         rte_compressdev_stop(0);
205         if (rte_compressdev_close(0) < 0)
206                 RTE_LOG(ERR, USER1, "Device could not be closed\n");
207 }
208
209 static int
210 test_compressdev_invalid_configuration(void)
211 {
212         struct rte_compressdev_config invalid_config;
213         struct rte_compressdev_config valid_config = {
214                 .socket_id = rte_socket_id(),
215                 .nb_queue_pairs = 1,
216                 .max_nb_priv_xforms = NUM_MAX_XFORMS,
217                 .max_nb_streams = 0
218         };
219         struct rte_compressdev_info dev_info;
220
221         /* Invalid configuration with 0 queue pairs */
222         memcpy(&invalid_config, &valid_config,
223                         sizeof(struct rte_compressdev_config));
224         invalid_config.nb_queue_pairs = 0;
225
226         TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
227                         "Device configuration was successful "
228                         "with no queue pairs (invalid)\n");
229
230         /*
231          * Invalid configuration with too many queue pairs
232          * (if there is an actual maximum number of queue pairs)
233          */
234         rte_compressdev_info_get(0, &dev_info);
235         if (dev_info.max_nb_queue_pairs != 0) {
236                 memcpy(&invalid_config, &valid_config,
237                         sizeof(struct rte_compressdev_config));
238                 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
239
240                 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
241                                 "Device configuration was successful "
242                                 "with too many queue pairs (invalid)\n");
243         }
244
245         /* Invalid queue pair setup, with no number of queue pairs set */
246         TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
247                                 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
248                         "Queue pair setup was successful "
249                         "with no queue pairs set (invalid)\n");
250
251         return TEST_SUCCESS;
252 }
253
254 static int
255 compare_buffers(const char *buffer1, uint32_t buffer1_len,
256                 const char *buffer2, uint32_t buffer2_len)
257 {
258         if (buffer1_len != buffer2_len) {
259                 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
260                 return -1;
261         }
262
263         if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
264                 RTE_LOG(ERR, USER1, "Buffers are different\n");
265                 return -1;
266         }
267
268         return 0;
269 }
270
271 /*
272  * Maps compressdev and Zlib flush flags
273  */
274 static int
275 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
276 {
277         switch (flag) {
278         case RTE_COMP_FLUSH_NONE:
279                 return Z_NO_FLUSH;
280         case RTE_COMP_FLUSH_SYNC:
281                 return Z_SYNC_FLUSH;
282         case RTE_COMP_FLUSH_FULL:
283                 return Z_FULL_FLUSH;
284         case RTE_COMP_FLUSH_FINAL:
285                 return Z_FINISH;
286         /*
287          * There should be only the values above,
288          * so this should never happen
289          */
290         default:
291                 return -1;
292         }
293 }
294
295 static int
296 compress_zlib(struct rte_comp_op *op,
297                 const struct rte_comp_xform *xform, int mem_level)
298 {
299         z_stream stream;
300         int zlib_flush;
301         int strategy, window_bits, comp_level;
302         int ret = TEST_FAILED;
303         uint8_t *single_src_buf = NULL;
304         uint8_t *single_dst_buf = NULL;
305
306         /* initialize zlib stream */
307         stream.zalloc = Z_NULL;
308         stream.zfree = Z_NULL;
309         stream.opaque = Z_NULL;
310
311         if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
312                 strategy = Z_FIXED;
313         else
314                 strategy = Z_DEFAULT_STRATEGY;
315
316         /*
317          * Window bits is the base two logarithm of the window size (in bytes).
318          * When doing raw DEFLATE, this number will be negative.
319          */
320         window_bits = -(xform->compress.window_size);
321
322         comp_level = xform->compress.level;
323
324         if (comp_level != RTE_COMP_LEVEL_NONE)
325                 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
326                         window_bits, mem_level, strategy);
327         else
328                 ret = deflateInit(&stream, Z_NO_COMPRESSION);
329
330         if (ret != Z_OK) {
331                 printf("Zlib deflate could not be initialized\n");
332                 goto exit;
333         }
334
335         /* Assuming stateless operation */
336         /* SGL */
337         if (op->m_src->nb_segs > 1) {
338                 single_src_buf = rte_malloc(NULL,
339                                 rte_pktmbuf_pkt_len(op->m_src), 0);
340                 if (single_src_buf == NULL) {
341                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
342                         goto exit;
343                 }
344                 single_dst_buf = rte_malloc(NULL,
345                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
346                 if (single_dst_buf == NULL) {
347                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
348                         goto exit;
349                 }
350                 if (rte_pktmbuf_read(op->m_src, 0,
351                                         rte_pktmbuf_pkt_len(op->m_src),
352                                         single_src_buf) == NULL) {
353                         RTE_LOG(ERR, USER1,
354                                 "Buffer could not be read entirely\n");
355                         goto exit;
356                 }
357
358                 stream.avail_in = op->src.length;
359                 stream.next_in = single_src_buf;
360                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
361                 stream.next_out = single_dst_buf;
362
363         } else {
364                 stream.avail_in = op->src.length;
365                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
366                 stream.avail_out = op->m_dst->data_len;
367                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
368         }
369         /* Stateless operation, all buffer will be compressed in one go */
370         zlib_flush = map_zlib_flush_flag(op->flush_flag);
371         ret = deflate(&stream, zlib_flush);
372
373         if (stream.avail_in != 0) {
374                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
375                 goto exit;
376         }
377
378         if (ret != Z_STREAM_END)
379                 goto exit;
380
381         /* Copy data to destination SGL */
382         if (op->m_src->nb_segs > 1) {
383                 uint32_t remaining_data = stream.total_out;
384                 uint8_t *src_data = single_dst_buf;
385                 struct rte_mbuf *dst_buf = op->m_dst;
386
387                 while (remaining_data > 0) {
388                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
389                                         uint8_t *);
390                         /* Last segment */
391                         if (remaining_data < dst_buf->data_len) {
392                                 memcpy(dst_data, src_data, remaining_data);
393                                 remaining_data = 0;
394                         } else {
395                                 memcpy(dst_data, src_data, dst_buf->data_len);
396                                 remaining_data -= dst_buf->data_len;
397                                 src_data += dst_buf->data_len;
398                                 dst_buf = dst_buf->next;
399                         }
400                 }
401         }
402
403         op->consumed = stream.total_in;
404         op->produced = stream.total_out;
405         op->status = RTE_COMP_OP_STATUS_SUCCESS;
406
407         deflateReset(&stream);
408
409         ret = 0;
410 exit:
411         deflateEnd(&stream);
412         rte_free(single_src_buf);
413         rte_free(single_dst_buf);
414
415         return ret;
416 }
417
418 static int
419 decompress_zlib(struct rte_comp_op *op,
420                 const struct rte_comp_xform *xform)
421 {
422         z_stream stream;
423         int window_bits;
424         int zlib_flush;
425         int ret = TEST_FAILED;
426         uint8_t *single_src_buf = NULL;
427         uint8_t *single_dst_buf = NULL;
428
429         /* initialize zlib stream */
430         stream.zalloc = Z_NULL;
431         stream.zfree = Z_NULL;
432         stream.opaque = Z_NULL;
433
434         /*
435          * Window bits is the base two logarithm of the window size (in bytes).
436          * When doing raw DEFLATE, this number will be negative.
437          */
438         window_bits = -(xform->decompress.window_size);
439
440         ret = inflateInit2(&stream, window_bits);
441
442         if (ret != Z_OK) {
443                 printf("Zlib deflate could not be initialized\n");
444                 goto exit;
445         }
446
447         /* Assuming stateless operation */
448         /* SGL */
449         if (op->m_src->nb_segs > 1) {
450                 single_src_buf = rte_malloc(NULL,
451                                 rte_pktmbuf_pkt_len(op->m_src), 0);
452                 if (single_src_buf == NULL) {
453                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
454                         goto exit;
455                 }
456                 single_dst_buf = rte_malloc(NULL,
457                                 rte_pktmbuf_pkt_len(op->m_dst), 0);
458                 if (single_dst_buf == NULL) {
459                         RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
460                         goto exit;
461                 }
462                 if (rte_pktmbuf_read(op->m_src, 0,
463                                         rte_pktmbuf_pkt_len(op->m_src),
464                                         single_src_buf) == NULL) {
465                         RTE_LOG(ERR, USER1,
466                                 "Buffer could not be read entirely\n");
467                         goto exit;
468                 }
469
470                 stream.avail_in = op->src.length;
471                 stream.next_in = single_src_buf;
472                 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
473                 stream.next_out = single_dst_buf;
474
475         } else {
476                 stream.avail_in = op->src.length;
477                 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
478                 stream.avail_out = op->m_dst->data_len;
479                 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
480         }
481
482         /* Stateless operation, all buffer will be compressed in one go */
483         zlib_flush = map_zlib_flush_flag(op->flush_flag);
484         ret = inflate(&stream, zlib_flush);
485
486         if (stream.avail_in != 0) {
487                 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
488                 goto exit;
489         }
490
491         if (ret != Z_STREAM_END)
492                 goto exit;
493
494         if (op->m_src->nb_segs > 1) {
495                 uint32_t remaining_data = stream.total_out;
496                 uint8_t *src_data = single_dst_buf;
497                 struct rte_mbuf *dst_buf = op->m_dst;
498
499                 while (remaining_data > 0) {
500                         uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
501                                         uint8_t *);
502                         /* Last segment */
503                         if (remaining_data < dst_buf->data_len) {
504                                 memcpy(dst_data, src_data, remaining_data);
505                                 remaining_data = 0;
506                         } else {
507                                 memcpy(dst_data, src_data, dst_buf->data_len);
508                                 remaining_data -= dst_buf->data_len;
509                                 src_data += dst_buf->data_len;
510                                 dst_buf = dst_buf->next;
511                         }
512                 }
513         }
514
515         op->consumed = stream.total_in;
516         op->produced = stream.total_out;
517         op->status = RTE_COMP_OP_STATUS_SUCCESS;
518
519         inflateReset(&stream);
520
521         ret = 0;
522 exit:
523         inflateEnd(&stream);
524
525         return ret;
526 }
527
528 static int
529 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
530                 uint32_t total_data_size,
531                 struct rte_mempool *small_mbuf_pool,
532                 struct rte_mempool *large_mbuf_pool,
533                 uint8_t limit_segs_in_sgl)
534 {
535         uint32_t remaining_data = total_data_size;
536         uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
537         struct rte_mempool *pool;
538         struct rte_mbuf *next_seg;
539         uint32_t data_size;
540         char *buf_ptr;
541         const char *data_ptr = test_buf;
542         uint16_t i;
543         int ret;
544
545         if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
546                 num_remaining_segs = limit_segs_in_sgl - 1;
547
548         /*
549          * Allocate data in the first segment (header) and
550          * copy data if test buffer is provided
551          */
552         if (remaining_data < SMALL_SEG_SIZE)
553                 data_size = remaining_data;
554         else
555                 data_size = SMALL_SEG_SIZE;
556         buf_ptr = rte_pktmbuf_append(head_buf, data_size);
557         if (buf_ptr == NULL) {
558                 RTE_LOG(ERR, USER1,
559                         "Not enough space in the 1st buffer\n");
560                 return -1;
561         }
562
563         if (data_ptr != NULL) {
564                 /* Copy characters without NULL terminator */
565                 strncpy(buf_ptr, data_ptr, data_size);
566                 data_ptr += data_size;
567         }
568         remaining_data -= data_size;
569         num_remaining_segs--;
570
571         /*
572          * Allocate the rest of the segments,
573          * copy the rest of the data and chain the segments.
574          */
575         for (i = 0; i < num_remaining_segs; i++) {
576
577                 if (i == (num_remaining_segs - 1)) {
578                         /* last segment */
579                         if (remaining_data > SMALL_SEG_SIZE)
580                                 pool = large_mbuf_pool;
581                         else
582                                 pool = small_mbuf_pool;
583                         data_size = remaining_data;
584                 } else {
585                         data_size = SMALL_SEG_SIZE;
586                         pool = small_mbuf_pool;
587                 }
588
589                 next_seg = rte_pktmbuf_alloc(pool);
590                 if (next_seg == NULL) {
591                         RTE_LOG(ERR, USER1,
592                                 "New segment could not be allocated "
593                                 "from the mempool\n");
594                         return -1;
595                 }
596                 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
597                 if (buf_ptr == NULL) {
598                         RTE_LOG(ERR, USER1,
599                                 "Not enough space in the buffer\n");
600                         rte_pktmbuf_free(next_seg);
601                         return -1;
602                 }
603                 if (data_ptr != NULL) {
604                         /* Copy characters without NULL terminator */
605                         strncpy(buf_ptr, data_ptr, data_size);
606                         data_ptr += data_size;
607                 }
608                 remaining_data -= data_size;
609
610                 ret = rte_pktmbuf_chain(head_buf, next_seg);
611                 if (ret != 0) {
612                         rte_pktmbuf_free(next_seg);
613                         RTE_LOG(ERR, USER1,
614                                 "Segment could not chained\n");
615                         return -1;
616                 }
617         }
618
619         return 0;
620 }
621
622 /*
623  * Compresses and decompresses buffer with compressdev API and Zlib API
624  */
625 static int
626 test_deflate_comp_decomp(const char * const test_bufs[],
627                 unsigned int num_bufs,
628                 uint16_t buf_idx[],
629                 struct rte_comp_xform *compress_xforms[],
630                 struct rte_comp_xform *decompress_xforms[],
631                 unsigned int num_xforms,
632                 enum rte_comp_op_type state,
633                 unsigned int sgl,
634                 enum zlib_direction zlib_dir)
635 {
636         struct comp_testsuite_params *ts_params = &testsuite_params;
637         int ret_status = -1;
638         int ret;
639         struct rte_mbuf *uncomp_bufs[num_bufs];
640         struct rte_mbuf *comp_bufs[num_bufs];
641         struct rte_comp_op *ops[num_bufs];
642         struct rte_comp_op *ops_processed[num_bufs];
643         void *priv_xforms[num_bufs];
644         uint16_t num_enqd, num_deqd, num_total_deqd;
645         uint16_t num_priv_xforms = 0;
646         unsigned int deqd_retries = 0;
647         struct priv_op_data *priv_data;
648         char *buf_ptr;
649         unsigned int i;
650         struct rte_mempool *buf_pool;
651         uint32_t data_size;
652         const struct rte_compressdev_capabilities *capa =
653                 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
654         char *contig_buf = NULL;
655
656         /* Initialize all arrays to NULL */
657         memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
658         memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
659         memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
660         memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
661         memset(priv_xforms, 0, sizeof(void *) * num_bufs);
662
663         if (sgl)
664                 buf_pool = ts_params->small_mbuf_pool;
665         else
666                 buf_pool = ts_params->large_mbuf_pool;
667
668         /* Prepare the source mbufs with the data */
669         ret = rte_pktmbuf_alloc_bulk(buf_pool,
670                                 uncomp_bufs, num_bufs);
671         if (ret < 0) {
672                 RTE_LOG(ERR, USER1,
673                         "Source mbufs could not be allocated "
674                         "from the mempool\n");
675                 goto exit;
676         }
677
678         if (sgl) {
679                 for (i = 0; i < num_bufs; i++) {
680                         data_size = strlen(test_bufs[i]) + 1;
681                         if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
682                                         data_size,
683                                         ts_params->small_mbuf_pool,
684                                         ts_params->large_mbuf_pool,
685                                         MAX_SEGS) < 0)
686                                 goto exit;
687                 }
688         } else {
689                 for (i = 0; i < num_bufs; i++) {
690                         data_size = strlen(test_bufs[i]) + 1;
691                         buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
692                         snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
693                 }
694         }
695
696         /* Prepare the destination mbufs */
697         ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
698         if (ret < 0) {
699                 RTE_LOG(ERR, USER1,
700                         "Destination mbufs could not be allocated "
701                         "from the mempool\n");
702                 goto exit;
703         }
704
705         if (sgl) {
706                 for (i = 0; i < num_bufs; i++) {
707                         data_size = strlen(test_bufs[i]) *
708                                 COMPRESS_BUF_SIZE_RATIO;
709                         if (prepare_sgl_bufs(NULL, comp_bufs[i],
710                                         data_size,
711                                         ts_params->small_mbuf_pool,
712                                         ts_params->large_mbuf_pool,
713                                         MAX_SEGS) < 0)
714                                 goto exit;
715                 }
716
717         } else {
718                 for (i = 0; i < num_bufs; i++) {
719                         data_size = strlen(test_bufs[i]) *
720                                 COMPRESS_BUF_SIZE_RATIO;
721                         rte_pktmbuf_append(comp_bufs[i], data_size);
722                 }
723         }
724
725         /* Build the compression operations */
726         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
727         if (ret < 0) {
728                 RTE_LOG(ERR, USER1,
729                         "Compress operations could not be allocated "
730                         "from the mempool\n");
731                 goto exit;
732         }
733
734         for (i = 0; i < num_bufs; i++) {
735                 ops[i]->m_src = uncomp_bufs[i];
736                 ops[i]->m_dst = comp_bufs[i];
737                 ops[i]->src.offset = 0;
738                 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
739                 ops[i]->dst.offset = 0;
740                 if (state == RTE_COMP_OP_STATELESS) {
741                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
742                 } else {
743                         RTE_LOG(ERR, USER1,
744                                 "Stateful operations are not supported "
745                                 "in these tests yet\n");
746                         goto exit;
747                 }
748                 ops[i]->input_chksum = 0;
749                 /*
750                  * Store original operation index in private data,
751                  * since ordering does not have to be maintained,
752                  * when dequeueing from compressdev, so a comparison
753                  * at the end of the test can be done.
754                  */
755                 priv_data = (struct priv_op_data *) (ops[i] + 1);
756                 priv_data->orig_idx = i;
757         }
758
759         /* Compress data (either with Zlib API or compressdev API */
760         if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
761                 for (i = 0; i < num_bufs; i++) {
762                         const struct rte_comp_xform *compress_xform =
763                                 compress_xforms[i % num_xforms];
764                         ret = compress_zlib(ops[i], compress_xform,
765                                         DEFAULT_MEM_LEVEL);
766                         if (ret < 0)
767                                 goto exit;
768
769                         ops_processed[i] = ops[i];
770                 }
771         } else {
772                 /* Create compress private xform data */
773                 for (i = 0; i < num_xforms; i++) {
774                         ret = rte_compressdev_private_xform_create(0,
775                                 (const struct rte_comp_xform *)compress_xforms[i],
776                                 &priv_xforms[i]);
777                         if (ret < 0) {
778                                 RTE_LOG(ERR, USER1,
779                                         "Compression private xform "
780                                         "could not be created\n");
781                                 goto exit;
782                         }
783                         num_priv_xforms++;
784                 }
785
786                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
787                         /* Attach shareable private xform data to ops */
788                         for (i = 0; i < num_bufs; i++)
789                                 ops[i]->private_xform = priv_xforms[i % num_xforms];
790                 } else {
791                         /* Create rest of the private xforms for the other ops */
792                         for (i = num_xforms; i < num_bufs; i++) {
793                                 ret = rte_compressdev_private_xform_create(0,
794                                         compress_xforms[i % num_xforms],
795                                         &priv_xforms[i]);
796                                 if (ret < 0) {
797                                         RTE_LOG(ERR, USER1,
798                                                 "Compression private xform "
799                                                 "could not be created\n");
800                                         goto exit;
801                                 }
802                                 num_priv_xforms++;
803                         }
804
805                         /* Attach non shareable private xform data to ops */
806                         for (i = 0; i < num_bufs; i++)
807                                 ops[i]->private_xform = priv_xforms[i];
808                 }
809
810                 /* Enqueue and dequeue all operations */
811                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
812                 if (num_enqd < num_bufs) {
813                         RTE_LOG(ERR, USER1,
814                                 "The operations could not be enqueued\n");
815                         goto exit;
816                 }
817
818                 num_total_deqd = 0;
819                 do {
820                         /*
821                          * If retrying a dequeue call, wait for 10 ms to allow
822                          * enough time to the driver to process the operations
823                          */
824                         if (deqd_retries != 0) {
825                                 /*
826                                  * Avoid infinite loop if not all the
827                                  * operations get out of the device
828                                  */
829                                 if (deqd_retries == MAX_DEQD_RETRIES) {
830                                         RTE_LOG(ERR, USER1,
831                                                 "Not all operations could be "
832                                                 "dequeued\n");
833                                         goto exit;
834                                 }
835                                 usleep(DEQUEUE_WAIT_TIME);
836                         }
837                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
838                                         &ops_processed[num_total_deqd], num_bufs);
839                         num_total_deqd += num_deqd;
840                         deqd_retries++;
841                 } while (num_total_deqd < num_enqd);
842
843                 deqd_retries = 0;
844
845                 /* Free compress private xforms */
846                 for (i = 0; i < num_priv_xforms; i++) {
847                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
848                         priv_xforms[i] = NULL;
849                 }
850                 num_priv_xforms = 0;
851         }
852
853         for (i = 0; i < num_bufs; i++) {
854                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
855                 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
856                 const struct rte_comp_compress_xform *compress_xform =
857                                 &compress_xforms[xform_idx]->compress;
858                 enum rte_comp_huffman huffman_type =
859                         compress_xform->deflate.huffman;
860                 char engine[] = "zlib (directly, not PMD)";
861                 if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
862                         strlcpy(engine, "PMD", sizeof(engine));
863
864                 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
865                         " %u bytes (level = %d, huffman = %s)\n",
866                         buf_idx[priv_data->orig_idx], engine,
867                         ops_processed[i]->consumed, ops_processed[i]->produced,
868                         compress_xform->level,
869                         huffman_type_strings[huffman_type]);
870                 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
871                         ops_processed[i]->consumed == 0 ? 0 :
872                         (float)ops_processed[i]->produced /
873                         ops_processed[i]->consumed * 100);
874                 ops[i] = NULL;
875         }
876
877         /*
878          * Check operation status and free source mbufs (destination mbuf and
879          * compress operation information is needed for the decompression stage)
880          */
881         for (i = 0; i < num_bufs; i++) {
882                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
883                         RTE_LOG(ERR, USER1,
884                                 "Some operations were not successful\n");
885                         goto exit;
886                 }
887                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
888                 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
889                 uncomp_bufs[priv_data->orig_idx] = NULL;
890         }
891
892         /* Allocate buffers for decompressed data */
893         ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
894         if (ret < 0) {
895                 RTE_LOG(ERR, USER1,
896                         "Destination mbufs could not be allocated "
897                         "from the mempool\n");
898                 goto exit;
899         }
900
901         if (sgl) {
902                 for (i = 0; i < num_bufs; i++) {
903                         priv_data = (struct priv_op_data *)
904                                         (ops_processed[i] + 1);
905                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
906                         if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
907                                         data_size,
908                                         ts_params->small_mbuf_pool,
909                                         ts_params->large_mbuf_pool,
910                                         MAX_SEGS) < 0)
911                                 goto exit;
912                 }
913
914         } else {
915                 for (i = 0; i < num_bufs; i++) {
916                         priv_data = (struct priv_op_data *)
917                                         (ops_processed[i] + 1);
918                         data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
919                         rte_pktmbuf_append(uncomp_bufs[i], data_size);
920                 }
921         }
922
923         /* Build the decompression operations */
924         ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
925         if (ret < 0) {
926                 RTE_LOG(ERR, USER1,
927                         "Decompress operations could not be allocated "
928                         "from the mempool\n");
929                 goto exit;
930         }
931
932         /* Source buffer is the compressed data from the previous operations */
933         for (i = 0; i < num_bufs; i++) {
934                 ops[i]->m_src = ops_processed[i]->m_dst;
935                 ops[i]->m_dst = uncomp_bufs[i];
936                 ops[i]->src.offset = 0;
937                 /*
938                  * Set the length of the compressed data to the
939                  * number of bytes that were produced in the previous stage
940                  */
941                 ops[i]->src.length = ops_processed[i]->produced;
942                 ops[i]->dst.offset = 0;
943                 if (state == RTE_COMP_OP_STATELESS) {
944                         ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
945                 } else {
946                         RTE_LOG(ERR, USER1,
947                                 "Stateful operations are not supported "
948                                 "in these tests yet\n");
949                         goto exit;
950                 }
951                 ops[i]->input_chksum = 0;
952                 /*
953                  * Copy private data from previous operations,
954                  * to keep the pointer to the original buffer
955                  */
956                 memcpy(ops[i] + 1, ops_processed[i] + 1,
957                                 sizeof(struct priv_op_data));
958         }
959
960         /*
961          * Free the previous compress operations,
962          * as it is not needed anymore
963          */
964         for (i = 0; i < num_bufs; i++) {
965                 rte_comp_op_free(ops_processed[i]);
966                 ops_processed[i] = NULL;
967         }
968
969         /* Decompress data (either with Zlib API or compressdev API */
970         if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
971                 for (i = 0; i < num_bufs; i++) {
972                         priv_data = (struct priv_op_data *)(ops[i] + 1);
973                         uint16_t xform_idx = priv_data->orig_idx % num_xforms;
974                         const struct rte_comp_xform *decompress_xform =
975                                 decompress_xforms[xform_idx];
976
977                         ret = decompress_zlib(ops[i], decompress_xform);
978                         if (ret < 0)
979                                 goto exit;
980
981                         ops_processed[i] = ops[i];
982                 }
983         } else {
984                 /* Create decompress private xform data */
985                 for (i = 0; i < num_xforms; i++) {
986                         ret = rte_compressdev_private_xform_create(0,
987                                 (const struct rte_comp_xform *)decompress_xforms[i],
988                                 &priv_xforms[i]);
989                         if (ret < 0) {
990                                 RTE_LOG(ERR, USER1,
991                                         "Decompression private xform "
992                                         "could not be created\n");
993                                 goto exit;
994                         }
995                         num_priv_xforms++;
996                 }
997
998                 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
999                         /* Attach shareable private xform data to ops */
1000                         for (i = 0; i < num_bufs; i++) {
1001                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1002                                 uint16_t xform_idx = priv_data->orig_idx %
1003                                                                 num_xforms;
1004                                 ops[i]->private_xform = priv_xforms[xform_idx];
1005                         }
1006                 } else {
1007                         /* Create rest of the private xforms for the other ops */
1008                         for (i = num_xforms; i < num_bufs; i++) {
1009                                 ret = rte_compressdev_private_xform_create(0,
1010                                         decompress_xforms[i % num_xforms],
1011                                         &priv_xforms[i]);
1012                                 if (ret < 0) {
1013                                         RTE_LOG(ERR, USER1,
1014                                                 "Decompression private xform "
1015                                                 "could not be created\n");
1016                                         goto exit;
1017                                 }
1018                                 num_priv_xforms++;
1019                         }
1020
1021                         /* Attach non shareable private xform data to ops */
1022                         for (i = 0; i < num_bufs; i++) {
1023                                 priv_data = (struct priv_op_data *)(ops[i] + 1);
1024                                 uint16_t xform_idx = priv_data->orig_idx;
1025                                 ops[i]->private_xform = priv_xforms[xform_idx];
1026                         }
1027                 }
1028
1029                 /* Enqueue and dequeue all operations */
1030                 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1031                 if (num_enqd < num_bufs) {
1032                         RTE_LOG(ERR, USER1,
1033                                 "The operations could not be enqueued\n");
1034                         goto exit;
1035                 }
1036
1037                 num_total_deqd = 0;
1038                 do {
1039                         /*
1040                          * If retrying a dequeue call, wait for 10 ms to allow
1041                          * enough time to the driver to process the operations
1042                          */
1043                         if (deqd_retries != 0) {
1044                                 /*
1045                                  * Avoid infinite loop if not all the
1046                                  * operations get out of the device
1047                                  */
1048                                 if (deqd_retries == MAX_DEQD_RETRIES) {
1049                                         RTE_LOG(ERR, USER1,
1050                                                 "Not all operations could be "
1051                                                 "dequeued\n");
1052                                         goto exit;
1053                                 }
1054                                 usleep(DEQUEUE_WAIT_TIME);
1055                         }
1056                         num_deqd = rte_compressdev_dequeue_burst(0, 0,
1057                                         &ops_processed[num_total_deqd], num_bufs);
1058                         num_total_deqd += num_deqd;
1059                         deqd_retries++;
1060                 } while (num_total_deqd < num_enqd);
1061
1062                 deqd_retries = 0;
1063         }
1064
1065         for (i = 0; i < num_bufs; i++) {
1066                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1067                 char engine[] = "zlib, (directly, no PMD)";
1068                 if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1069                         strlcpy(engine, "pmd", sizeof(engine));
1070                 RTE_LOG(DEBUG, USER1,
1071                         "Buffer %u decompressed by %s from %u to %u bytes\n",
1072                         buf_idx[priv_data->orig_idx], engine,
1073                         ops_processed[i]->consumed, ops_processed[i]->produced);
1074                 ops[i] = NULL;
1075         }
1076
1077         /*
1078          * Check operation status and free source mbuf (destination mbuf and
1079          * compress operation information is still needed)
1080          */
1081         for (i = 0; i < num_bufs; i++) {
1082                 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1083                         RTE_LOG(ERR, USER1,
1084                                 "Some operations were not successful\n");
1085                         goto exit;
1086                 }
1087                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1088                 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1089                 comp_bufs[priv_data->orig_idx] = NULL;
1090         }
1091
1092         /*
1093          * Compare the original stream with the decompressed stream
1094          * (in size and the data)
1095          */
1096         for (i = 0; i < num_bufs; i++) {
1097                 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1098                 const char *buf1 = test_bufs[priv_data->orig_idx];
1099                 const char *buf2;
1100                 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1101                 if (contig_buf == NULL) {
1102                         RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1103                                         "be allocated\n");
1104                         goto exit;
1105                 }
1106
1107                 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1108                                 ops_processed[i]->produced, contig_buf);
1109
1110                 if (compare_buffers(buf1, strlen(buf1) + 1,
1111                                 buf2, ops_processed[i]->produced) < 0)
1112                         goto exit;
1113
1114                 rte_free(contig_buf);
1115                 contig_buf = NULL;
1116         }
1117
1118         ret_status = 0;
1119
1120 exit:
1121         /* Free resources */
1122         for (i = 0; i < num_bufs; i++) {
1123                 rte_pktmbuf_free(uncomp_bufs[i]);
1124                 rte_pktmbuf_free(comp_bufs[i]);
1125                 rte_comp_op_free(ops[i]);
1126                 rte_comp_op_free(ops_processed[i]);
1127         }
1128         for (i = 0; i < num_priv_xforms; i++) {
1129                 if (priv_xforms[i] != NULL)
1130                         rte_compressdev_private_xform_free(0, priv_xforms[i]);
1131         }
1132         rte_free(contig_buf);
1133
1134         return ret_status;
1135 }
1136
1137 static int
1138 test_compressdev_deflate_stateless_fixed(void)
1139 {
1140         struct comp_testsuite_params *ts_params = &testsuite_params;
1141         const char *test_buffer;
1142         uint16_t i;
1143         int ret;
1144         const struct rte_compressdev_capabilities *capab;
1145
1146         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1147         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1148
1149         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1150                 return -ENOTSUP;
1151
1152         struct rte_comp_xform *compress_xform =
1153                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1154
1155         if (compress_xform == NULL) {
1156                 RTE_LOG(ERR, USER1,
1157                         "Compress xform could not be created\n");
1158                 ret = TEST_FAILED;
1159                 goto exit;
1160         }
1161
1162         memcpy(compress_xform, ts_params->def_comp_xform,
1163                         sizeof(struct rte_comp_xform));
1164         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1165
1166         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1167                 test_buffer = compress_test_bufs[i];
1168
1169                 /* Compress with compressdev, decompress with Zlib */
1170                 if (test_deflate_comp_decomp(&test_buffer, 1,
1171                                 &i,
1172                                 &compress_xform,
1173                                 &ts_params->def_decomp_xform,
1174                                 1,
1175                                 RTE_COMP_OP_STATELESS,
1176                                 0,
1177                                 ZLIB_DECOMPRESS) < 0) {
1178                         ret = TEST_FAILED;
1179                         goto exit;
1180                 }
1181
1182                 /* Compress with Zlib, decompress with compressdev */
1183                 if (test_deflate_comp_decomp(&test_buffer, 1,
1184                                 &i,
1185                                 &compress_xform,
1186                                 &ts_params->def_decomp_xform,
1187                                 1,
1188                                 RTE_COMP_OP_STATELESS,
1189                                 0,
1190                                 ZLIB_COMPRESS) < 0) {
1191                         ret = TEST_FAILED;
1192                         goto exit;
1193                 }
1194         }
1195
1196         ret = TEST_SUCCESS;
1197
1198 exit:
1199         rte_free(compress_xform);
1200         return ret;
1201 }
1202
1203 static int
1204 test_compressdev_deflate_stateless_dynamic(void)
1205 {
1206         struct comp_testsuite_params *ts_params = &testsuite_params;
1207         const char *test_buffer;
1208         uint16_t i;
1209         int ret;
1210         struct rte_comp_xform *compress_xform =
1211                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1212
1213         const struct rte_compressdev_capabilities *capab;
1214
1215         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1216         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1217
1218         if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1219                 return -ENOTSUP;
1220
1221         if (compress_xform == NULL) {
1222                 RTE_LOG(ERR, USER1,
1223                         "Compress xform could not be created\n");
1224                 ret = TEST_FAILED;
1225                 goto exit;
1226         }
1227
1228         memcpy(compress_xform, ts_params->def_comp_xform,
1229                         sizeof(struct rte_comp_xform));
1230         compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1231
1232         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1233                 test_buffer = compress_test_bufs[i];
1234
1235                 /* Compress with compressdev, decompress with Zlib */
1236                 if (test_deflate_comp_decomp(&test_buffer, 1,
1237                                 &i,
1238                                 &compress_xform,
1239                                 &ts_params->def_decomp_xform,
1240                                 1,
1241                                 RTE_COMP_OP_STATELESS,
1242                                 0,
1243                                 ZLIB_DECOMPRESS) < 0) {
1244                         ret = TEST_FAILED;
1245                         goto exit;
1246                 }
1247
1248                 /* Compress with Zlib, decompress with compressdev */
1249                 if (test_deflate_comp_decomp(&test_buffer, 1,
1250                                 &i,
1251                                 &compress_xform,
1252                                 &ts_params->def_decomp_xform,
1253                                 1,
1254                                 RTE_COMP_OP_STATELESS,
1255                                 0,
1256                                 ZLIB_COMPRESS) < 0) {
1257                         ret = TEST_FAILED;
1258                         goto exit;
1259                 }
1260         }
1261
1262         ret = TEST_SUCCESS;
1263
1264 exit:
1265         rte_free(compress_xform);
1266         return ret;
1267 }
1268
1269 static int
1270 test_compressdev_deflate_stateless_multi_op(void)
1271 {
1272         struct comp_testsuite_params *ts_params = &testsuite_params;
1273         uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1274         uint16_t buf_idx[num_bufs];
1275         uint16_t i;
1276
1277         for (i = 0; i < num_bufs; i++)
1278                 buf_idx[i] = i;
1279
1280         /* Compress with compressdev, decompress with Zlib */
1281         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1282                         buf_idx,
1283                         &ts_params->def_comp_xform,
1284                         &ts_params->def_decomp_xform,
1285                         1,
1286                         RTE_COMP_OP_STATELESS,
1287                         0,
1288                         ZLIB_DECOMPRESS) < 0)
1289                 return TEST_FAILED;
1290
1291         /* Compress with Zlib, decompress with compressdev */
1292         if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1293                         buf_idx,
1294                         &ts_params->def_comp_xform,
1295                         &ts_params->def_decomp_xform,
1296                         1,
1297                         RTE_COMP_OP_STATELESS,
1298                         0,
1299                         ZLIB_COMPRESS) < 0)
1300                 return TEST_FAILED;
1301
1302         return TEST_SUCCESS;
1303 }
1304
1305 static int
1306 test_compressdev_deflate_stateless_multi_level(void)
1307 {
1308         struct comp_testsuite_params *ts_params = &testsuite_params;
1309         const char *test_buffer;
1310         unsigned int level;
1311         uint16_t i;
1312         int ret;
1313         struct rte_comp_xform *compress_xform =
1314                         rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1315
1316         if (compress_xform == NULL) {
1317                 RTE_LOG(ERR, USER1,
1318                         "Compress xform could not be created\n");
1319                 ret = TEST_FAILED;
1320                 goto exit;
1321         }
1322
1323         memcpy(compress_xform, ts_params->def_comp_xform,
1324                         sizeof(struct rte_comp_xform));
1325
1326         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1327                 test_buffer = compress_test_bufs[i];
1328                 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1329                                 level++) {
1330                         compress_xform->compress.level = level;
1331                         /* Compress with compressdev, decompress with Zlib */
1332                         if (test_deflate_comp_decomp(&test_buffer, 1,
1333                                         &i,
1334                                         &compress_xform,
1335                                         &ts_params->def_decomp_xform,
1336                                         1,
1337                                         RTE_COMP_OP_STATELESS,
1338                                         0,
1339                                         ZLIB_DECOMPRESS) < 0) {
1340                                 ret = TEST_FAILED;
1341                                 goto exit;
1342                         }
1343                 }
1344         }
1345
1346         ret = TEST_SUCCESS;
1347
1348 exit:
1349         rte_free(compress_xform);
1350         return ret;
1351 }
1352
1353 #define NUM_XFORMS 3
1354 static int
1355 test_compressdev_deflate_stateless_multi_xform(void)
1356 {
1357         struct comp_testsuite_params *ts_params = &testsuite_params;
1358         uint16_t num_bufs = NUM_XFORMS;
1359         struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1360         struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1361         const char *test_buffers[NUM_XFORMS];
1362         uint16_t i;
1363         unsigned int level = RTE_COMP_LEVEL_MIN;
1364         uint16_t buf_idx[num_bufs];
1365
1366         int ret;
1367
1368         /* Create multiple xforms with various levels */
1369         for (i = 0; i < NUM_XFORMS; i++) {
1370                 compress_xforms[i] = rte_malloc(NULL,
1371                                 sizeof(struct rte_comp_xform), 0);
1372                 if (compress_xforms[i] == NULL) {
1373                         RTE_LOG(ERR, USER1,
1374                                 "Compress xform could not be created\n");
1375                         ret = TEST_FAILED;
1376                         goto exit;
1377                 }
1378
1379                 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1380                                 sizeof(struct rte_comp_xform));
1381                 compress_xforms[i]->compress.level = level;
1382                 level++;
1383
1384                 decompress_xforms[i] = rte_malloc(NULL,
1385                                 sizeof(struct rte_comp_xform), 0);
1386                 if (decompress_xforms[i] == NULL) {
1387                         RTE_LOG(ERR, USER1,
1388                                 "Decompress xform could not be created\n");
1389                         ret = TEST_FAILED;
1390                         goto exit;
1391                 }
1392
1393                 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1394                                 sizeof(struct rte_comp_xform));
1395         }
1396
1397         for (i = 0; i < NUM_XFORMS; i++) {
1398                 buf_idx[i] = 0;
1399                 /* Use the same buffer in all sessions */
1400                 test_buffers[i] = compress_test_bufs[0];
1401         }
1402         /* Compress with compressdev, decompress with Zlib */
1403         if (test_deflate_comp_decomp(test_buffers, num_bufs,
1404                         buf_idx,
1405                         compress_xforms,
1406                         decompress_xforms,
1407                         NUM_XFORMS,
1408                         RTE_COMP_OP_STATELESS,
1409                         0,
1410                         ZLIB_DECOMPRESS) < 0) {
1411                 ret = TEST_FAILED;
1412                 goto exit;
1413         }
1414
1415         ret = TEST_SUCCESS;
1416 exit:
1417         for (i = 0; i < NUM_XFORMS; i++) {
1418                 rte_free(compress_xforms[i]);
1419                 rte_free(decompress_xforms[i]);
1420         }
1421
1422         return ret;
1423 }
1424
1425 static int
1426 test_compressdev_deflate_stateless_sgl(void)
1427 {
1428         struct comp_testsuite_params *ts_params = &testsuite_params;
1429         uint16_t i;
1430         const char *test_buffer;
1431         const struct rte_compressdev_capabilities *capab;
1432
1433         capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1434         TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1435
1436         if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1437                 return -ENOTSUP;
1438
1439         for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1440                 test_buffer = compress_test_bufs[i];
1441                 /* Compress with compressdev, decompress with Zlib */
1442                 if (test_deflate_comp_decomp(&test_buffer, 1,
1443                                 &i,
1444                                 &ts_params->def_comp_xform,
1445                                 &ts_params->def_decomp_xform,
1446                                 1,
1447                                 RTE_COMP_OP_STATELESS,
1448                                 1,
1449                                 ZLIB_DECOMPRESS) < 0)
1450                         return TEST_FAILED;
1451
1452                 /* Compress with Zlib, decompress with compressdev */
1453                 if (test_deflate_comp_decomp(&test_buffer, 1,
1454                                 &i,
1455                                 &ts_params->def_comp_xform,
1456                                 &ts_params->def_decomp_xform,
1457                                 1,
1458                                 RTE_COMP_OP_STATELESS,
1459                                 1,
1460                                 ZLIB_COMPRESS) < 0)
1461                         return TEST_FAILED;
1462         }
1463
1464         return TEST_SUCCESS;
1465 }
1466
1467 static struct unit_test_suite compressdev_testsuite  = {
1468         .suite_name = "compressdev unit test suite",
1469         .setup = testsuite_setup,
1470         .teardown = testsuite_teardown,
1471         .unit_test_cases = {
1472                 TEST_CASE_ST(NULL, NULL,
1473                         test_compressdev_invalid_configuration),
1474                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1475                         test_compressdev_deflate_stateless_fixed),
1476                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1477                         test_compressdev_deflate_stateless_dynamic),
1478                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1479                         test_compressdev_deflate_stateless_multi_op),
1480                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1481                         test_compressdev_deflate_stateless_multi_level),
1482                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1483                         test_compressdev_deflate_stateless_multi_xform),
1484                 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1485                         test_compressdev_deflate_stateless_sgl),
1486                 TEST_CASES_END() /**< NULL terminate unit test array */
1487         }
1488 };
1489
1490 static int
1491 test_compressdev(void)
1492 {
1493         return unit_test_suite_runner(&compressdev_testsuite);
1494 }
1495
1496 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);