app/compress-perf: add verification test case
[dpdk.git] / app / test-compress-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_eal.h>
7 #include <rte_log.h>
8 #include <rte_compressdev.h>
9
10 #include "comp_perf_options.h"
11 #include "comp_perf_test_verify.h"
12 #include "comp_perf.h"
13 #include "comp_perf_test_common.h"
14
15 #define NUM_MAX_XFORMS 16
16 #define NUM_MAX_INFLIGHT_OPS 512
17
18 __extension__
19 const char *cperf_test_type_strs[] = {
20         [CPERF_TEST_TYPE_BENCHMARK] = "benchmark",
21         [CPERF_TEST_TYPE_VERIFY] = "verify"
22 };
23
24 __extension__
25 static const struct cperf_test cperf_testmap[] = {
26         [CPERF_TEST_TYPE_BENCHMARK] = {
27                         cperf_benchmark_test_constructor,
28                         cperf_benchmark_test_runner,
29                         cperf_benchmark_test_destructor
30         },
31         [CPERF_TEST_TYPE_VERIFY] = {
32                         cperf_verify_test_constructor,
33                         cperf_verify_test_runner,
34                         cperf_verify_test_destructor
35         }
36 };
37
38 static int
39 comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id)
40 {
41         const struct rte_compressdev_capabilities *cap;
42
43         cap = rte_compressdev_capability_get(cdev_id,
44                                              RTE_COMP_ALGO_DEFLATE);
45
46         if (cap == NULL) {
47                 RTE_LOG(ERR, USER1,
48                         "Compress device does not support DEFLATE\n");
49                 return -1;
50         }
51
52         uint64_t comp_flags = cap->comp_feature_flags;
53
54         /* Huffman enconding */
55         if (test_data->huffman_enc == RTE_COMP_HUFFMAN_FIXED &&
56                         (comp_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) {
57                 RTE_LOG(ERR, USER1,
58                         "Compress device does not supported Fixed Huffman\n");
59                 return -1;
60         }
61
62         if (test_data->huffman_enc == RTE_COMP_HUFFMAN_DYNAMIC &&
63                         (comp_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) {
64                 RTE_LOG(ERR, USER1,
65                         "Compress device does not supported Dynamic Huffman\n");
66                 return -1;
67         }
68
69         /* Window size */
70         if (test_data->window_sz != -1) {
71                 if (param_range_check(test_data->window_sz, &cap->window_size)
72                                 < 0) {
73                         RTE_LOG(ERR, USER1,
74                                 "Compress device does not support "
75                                 "this window size\n");
76                         return -1;
77                 }
78         } else
79                 /* Set window size to PMD maximum if none was specified */
80                 test_data->window_sz = cap->window_size.max;
81
82         /* Check if chained mbufs is supported */
83         if (test_data->max_sgl_segs > 1  &&
84                         (comp_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) {
85                 RTE_LOG(INFO, USER1, "Compress device does not support "
86                                 "chained mbufs. Max SGL segments set to 1\n");
87                 test_data->max_sgl_segs = 1;
88         }
89
90         /* Level 0 support */
91         if (test_data->level_lst.min == 0 &&
92                         (comp_flags & RTE_COMP_FF_NONCOMPRESSED_BLOCKS) == 0) {
93                 RTE_LOG(ERR, USER1, "Compress device does not support "
94                                 "level 0 (no compression)\n");
95                 return -1;
96         }
97
98         return 0;
99 }
100
101 static int
102 comp_perf_initialize_compressdev(struct comp_test_data *test_data,
103                                  uint8_t *enabled_cdevs)
104 {
105         uint8_t enabled_cdev_count, nb_lcores, cdev_id;
106         unsigned int i, j;
107         int ret;
108
109         enabled_cdev_count = rte_compressdev_devices_get(test_data->driver_name,
110                         enabled_cdevs, RTE_COMPRESS_MAX_DEVS);
111         if (enabled_cdev_count == 0) {
112                 RTE_LOG(ERR, USER1, "No compress devices type %s available\n",
113                                 test_data->driver_name);
114                 return -EINVAL;
115         }
116
117         nb_lcores = rte_lcore_count() - 1;
118         /*
119          * Use fewer devices,
120          * if there are more available than cores.
121          */
122         if (enabled_cdev_count > nb_lcores) {
123                 enabled_cdev_count = nb_lcores;
124                 RTE_LOG(INFO, USER1,
125                         " There's more available devices than cores!"
126                         " The number of devices has been aligned to %d cores\n",
127                         nb_lcores);
128         }
129
130         /*
131          * Calculate number of needed queue pairs, based on the amount
132          * of available number of logical cores and compression devices.
133          * For instance, if there are 4 cores and 2 compression devices,
134          * 2 queue pairs will be set up per device.
135          * One queue pair per one core.
136          * if e.g.: there're 3 cores and 2 compression devices,
137          * 2 queue pairs will be set up per device but one queue pair
138          * will left unused in the last one device
139          */
140         test_data->nb_qps = (nb_lcores % enabled_cdev_count) ?
141                                 (nb_lcores / enabled_cdev_count) + 1 :
142                                 nb_lcores / enabled_cdev_count;
143
144         for (i = 0; i < enabled_cdev_count &&
145                         i < RTE_COMPRESS_MAX_DEVS; i++,
146                                         nb_lcores -= test_data->nb_qps) {
147                 cdev_id = enabled_cdevs[i];
148
149                 struct rte_compressdev_info cdev_info;
150                 uint8_t socket_id = rte_compressdev_socket_id(cdev_id);
151
152                 rte_compressdev_info_get(cdev_id, &cdev_info);
153                 if (cdev_info.max_nb_queue_pairs &&
154                         test_data->nb_qps > cdev_info.max_nb_queue_pairs) {
155                         RTE_LOG(ERR, USER1,
156                                 "Number of needed queue pairs is higher "
157                                 "than the maximum number of queue pairs "
158                                 "per device.\n");
159                         RTE_LOG(ERR, USER1,
160                                 "Lower the number of cores or increase "
161                                 "the number of crypto devices\n");
162                         return -EINVAL;
163                 }
164
165                 if (comp_perf_check_capabilities(test_data, cdev_id) < 0)
166                         return -EINVAL;
167
168                 /* Configure compressdev */
169                 struct rte_compressdev_config config = {
170                         .socket_id = socket_id,
171                         .nb_queue_pairs = nb_lcores > test_data->nb_qps
172                                         ? test_data->nb_qps : nb_lcores,
173                         .max_nb_priv_xforms = NUM_MAX_XFORMS,
174                         .max_nb_streams = 0
175                 };
176
177                 if (rte_compressdev_configure(cdev_id, &config) < 0) {
178                         RTE_LOG(ERR, USER1, "Device configuration failed\n");
179                         return -EINVAL;
180                 }
181
182                 for (j = 0; j < test_data->nb_qps; j++) {
183                         ret = rte_compressdev_queue_pair_setup(cdev_id, j,
184                                         NUM_MAX_INFLIGHT_OPS, socket_id);
185                         if (ret < 0) {
186                                 RTE_LOG(ERR, USER1,
187                               "Failed to setup queue pair %u on compressdev %u",
188                                         j, cdev_id);
189                                 return -EINVAL;
190                         }
191                 }
192
193                 ret = rte_compressdev_start(cdev_id);
194                 if (ret < 0) {
195                         RTE_LOG(ERR, USER1,
196                                 "Failed to start device %u: error %d\n",
197                                 cdev_id, ret);
198                         return -EPERM;
199                 }
200         }
201
202         return enabled_cdev_count;
203 }
204
205 static int
206 comp_perf_dump_input_data(struct comp_test_data *test_data)
207 {
208         FILE *f = fopen(test_data->input_file, "r");
209         int ret = -1;
210
211         if (f == NULL) {
212                 RTE_LOG(ERR, USER1, "Input file could not be opened\n");
213                 return -1;
214         }
215
216         if (fseek(f, 0, SEEK_END) != 0) {
217                 RTE_LOG(ERR, USER1, "Size of input could not be calculated\n");
218                 goto end;
219         }
220         size_t actual_file_sz = ftell(f);
221         /* If extended input data size has not been set,
222          * input data size = file size
223          */
224
225         if (test_data->input_data_sz == 0)
226                 test_data->input_data_sz = actual_file_sz;
227
228         if (test_data->input_data_sz <= 0 || actual_file_sz <= 0 ||
229                         fseek(f, 0, SEEK_SET) != 0) {
230                 RTE_LOG(ERR, USER1, "Size of input could not be calculated\n");
231                 goto end;
232         }
233
234         test_data->input_data = rte_zmalloc_socket(NULL,
235                                 test_data->input_data_sz, 0, rte_socket_id());
236
237         if (test_data->input_data == NULL) {
238                 RTE_LOG(ERR, USER1, "Memory to hold the data from the input "
239                                 "file could not be allocated\n");
240                 goto end;
241         }
242
243         size_t remaining_data = test_data->input_data_sz;
244         uint8_t *data = test_data->input_data;
245
246         while (remaining_data > 0) {
247                 size_t data_to_read = RTE_MIN(remaining_data, actual_file_sz);
248
249                 if (fread(data, data_to_read, 1, f) != 1) {
250                         RTE_LOG(ERR, USER1, "Input file could not be read\n");
251                         goto end;
252                 }
253                 if (fseek(f, 0, SEEK_SET) != 0) {
254                         RTE_LOG(ERR, USER1,
255                                 "Size of input could not be calculated\n");
256                         goto end;
257                 }
258                 remaining_data -= data_to_read;
259                 data += data_to_read;
260         }
261
262         if (test_data->input_data_sz > actual_file_sz)
263                 RTE_LOG(INFO, USER1,
264                   "%zu bytes read from file %s, extending the file %.2f times\n",
265                         test_data->input_data_sz, test_data->input_file,
266                         (double)test_data->input_data_sz/actual_file_sz);
267         else
268                 RTE_LOG(INFO, USER1,
269                         "%zu bytes read from file %s\n",
270                         test_data->input_data_sz, test_data->input_file);
271
272         ret = 0;
273
274 end:
275         fclose(f);
276         return ret;
277 }
278
279 int
280 main(int argc, char **argv)
281 {
282         uint8_t level_idx = 0;
283         int ret, i;
284         struct comp_test_data *test_data;
285         void *ctx[RTE_MAX_LCORE] = {};
286         uint8_t enabled_cdevs[RTE_COMPRESS_MAX_DEVS];
287         int nb_compressdevs = 0;
288         uint16_t total_nb_qps = 0;
289         uint8_t cdev_id;
290         uint32_t lcore_id;
291
292         /* Initialise DPDK EAL */
293         ret = rte_eal_init(argc, argv);
294         if (ret < 0)
295                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
296         argc -= ret;
297         argv += ret;
298
299         test_data = rte_zmalloc_socket(NULL, sizeof(struct comp_test_data),
300                                         0, rte_socket_id());
301
302         if (test_data == NULL)
303                 rte_exit(EXIT_FAILURE, "Cannot reserve memory in socket %d\n",
304                                 rte_socket_id());
305
306         ret = EXIT_SUCCESS;
307         test_data->cleanup = ST_TEST_DATA;
308         comp_perf_options_default(test_data);
309
310         if (comp_perf_options_parse(test_data, argc, argv) < 0) {
311                 RTE_LOG(ERR, USER1,
312                         "Parsing one or more user options failed\n");
313                 ret = EXIT_FAILURE;
314                 goto end;
315         }
316
317         if (comp_perf_options_check(test_data) < 0) {
318                 ret = EXIT_FAILURE;
319                 goto end;
320         }
321
322         nb_compressdevs =
323                 comp_perf_initialize_compressdev(test_data, enabled_cdevs);
324
325         if (nb_compressdevs < 1) {
326                 ret = EXIT_FAILURE;
327                 goto end;
328         }
329
330         test_data->cleanup = ST_COMPDEV;
331         if (comp_perf_dump_input_data(test_data) < 0) {
332                 ret = EXIT_FAILURE;
333                 goto end;
334         }
335
336         test_data->cleanup = ST_INPUT_DATA;
337
338         if (test_data->level_lst.inc != 0)
339                 test_data->level = test_data->level_lst.min;
340         else
341                 test_data->level = test_data->level_lst.list[0];
342
343         printf("App uses socket: %u\n", rte_socket_id());
344         printf("Burst size = %u\n", test_data->burst_sz);
345         printf("File size = %zu\n", test_data->input_data_sz);
346
347         test_data->cleanup = ST_DURING_TEST;
348         total_nb_qps = nb_compressdevs * test_data->nb_qps;
349
350         i = 0;
351         uint8_t qp_id = 0, cdev_index = 0;
352
353         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
354
355                 if (i == total_nb_qps)
356                         break;
357
358                 cdev_id = enabled_cdevs[cdev_index];
359                 ctx[i] = cperf_testmap[test_data->test].constructor(
360                                                         cdev_id, qp_id,
361                                                         test_data);
362                 if (ctx[i] == NULL) {
363                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
364                         goto end;
365                 }
366                 qp_id = (qp_id + 1) % test_data->nb_qps;
367                 if (qp_id == 0)
368                         cdev_index++;
369                 i++;
370         }
371
372         while (test_data->level <= test_data->level_lst.max) {
373
374                 i = 0;
375                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
376
377                         if (i == total_nb_qps)
378                                 break;
379
380                         rte_eal_remote_launch(
381                                         cperf_testmap[test_data->test].runner,
382                                         ctx[i], lcore_id);
383                         i++;
384                 }
385                 i = 0;
386                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
387
388                         if (i == total_nb_qps)
389                                 break;
390                         ret |= rte_eal_wait_lcore(lcore_id);
391                         i++;
392                 }
393
394                 if (ret != EXIT_SUCCESS)
395                         break;
396
397                 if (test_data->level_lst.inc != 0)
398                         test_data->level += test_data->level_lst.inc;
399                 else {
400                         if (++level_idx == test_data->level_lst.count)
401                                 break;
402                         test_data->level = test_data->level_lst.list[level_idx];
403                 }
404         }
405
406 end:
407         switch (test_data->cleanup) {
408
409         case ST_DURING_TEST:
410                 i = 0;
411                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
412                         if (i == total_nb_qps)
413                                 break;
414
415                         if (ctx[i] && cperf_testmap[test_data->test].destructor)
416                                 cperf_testmap[test_data->test].destructor(
417                                                                         ctx[i]);
418                         i++;
419                 }
420                 /* fallthrough */
421         case ST_INPUT_DATA:
422                 rte_free(test_data->input_data);
423                 /* fallthrough */
424         case ST_COMPDEV:
425                 for (i = 0; i < nb_compressdevs &&
426                                 i < RTE_COMPRESS_MAX_DEVS; i++)
427                         rte_compressdev_stop(enabled_cdevs[i]);
428                 /* fallthrough */
429         case ST_TEST_DATA:
430                 rte_free(test_data);
431                 /* fallthrough */
432         case ST_CLEAR:
433         default:
434                 i = rte_eal_cleanup();
435                 if (i) {
436                         RTE_LOG(ERR, USER1,
437                                 "Error from rte_eal_cleanup(), %d\n", i);
438                         ret = i;
439                 }
440                 break;
441         }
442         return ret;
443 }
444
445 __rte_weak void *
446 cperf_benchmark_test_constructor(uint8_t dev_id __rte_unused,
447                                  uint16_t qp_id __rte_unused,
448                                  struct comp_test_data *options __rte_unused)
449 {
450         RTE_LOG(INFO, USER1, "Benchmark test is not supported yet\n");
451         return NULL;
452 }
453
454 __rte_weak void
455 cperf_benchmark_test_destructor(void *arg __rte_unused)
456 {
457
458 }
459
460 __rte_weak int
461 cperf_benchmark_test_runner(void *test_ctx __rte_unused)
462 {
463         return 0;
464 }
465 __rte_weak void *
466 cperf_verify_test_constructor(uint8_t dev_id __rte_unused,
467                                  uint16_t qp_id __rte_unused,
468                                  struct comp_test_data *options __rte_unused)
469 {
470         RTE_LOG(INFO, USER1, "Verify test is not supported yet\n");
471         return NULL;
472 }
473
474 __rte_weak void
475 cperf_verify_test_destructor(void *arg __rte_unused)
476 {
477
478 }
479
480 __rte_weak int
481 cperf_verify_test_runner(void *test_ctx __rte_unused)
482 {
483         return 0;
484 }