* Copyright(c) 2018 Intel Corporation
*/
+#include <signal.h>
+#include <sys/types.h>
+#include <unistd.h>
+
#include <rte_malloc.h>
#include <rte_eal.h>
#include <rte_log.h>
#include <rte_compressdev.h>
-#include "comp_perf_options.h"
-#include "comp_perf_test_verify.h"
#include "comp_perf.h"
+#include "comp_perf_options.h"
#include "comp_perf_test_common.h"
+#include "comp_perf_test_cyclecount.h"
+#include "comp_perf_test_throughput.h"
+#include "comp_perf_test_verify.h"
#define NUM_MAX_XFORMS 16
#define NUM_MAX_INFLIGHT_OPS 512
__extension__
-const char *cperf_test_type_strs[] = {
- [CPERF_TEST_TYPE_BENCHMARK] = "benchmark",
- [CPERF_TEST_TYPE_VERIFY] = "verify"
+const char *comp_perf_test_type_strs[] = {
+ [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
+ [CPERF_TEST_TYPE_VERIFY] = "verify",
+ [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
};
__extension__
static const struct cperf_test cperf_testmap[] = {
- [CPERF_TEST_TYPE_BENCHMARK] = {
- cperf_benchmark_test_constructor,
- cperf_benchmark_test_runner,
- cperf_benchmark_test_destructor
+ [CPERF_TEST_TYPE_THROUGHPUT] = {
+ cperf_throughput_test_constructor,
+ cperf_throughput_test_runner,
+ cperf_throughput_test_destructor
+
},
[CPERF_TEST_TYPE_VERIFY] = {
cperf_verify_test_constructor,
cperf_verify_test_runner,
cperf_verify_test_destructor
+ },
+
+ [CPERF_TEST_TYPE_PMDCC] = {
+ cperf_cyclecount_test_constructor,
+ cperf_cyclecount_test_runner,
+ cperf_cyclecount_test_destructor
}
};
+static struct comp_test_data *test_data;
+
static int
comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id)
{
enabled_cdev_count = rte_compressdev_devices_get(test_data->driver_name,
enabled_cdevs, RTE_COMPRESS_MAX_DEVS);
if (enabled_cdev_count == 0) {
- RTE_LOG(ERR, USER1, "No compress devices type %s available\n",
+ RTE_LOG(ERR, USER1, "No compress devices type %s available,"
+ " please check the list of specified devices in EAL section\n",
test_data->driver_name);
return -EINVAL;
}
* if there are more available than cores.
*/
if (enabled_cdev_count > nb_lcores) {
+ if (nb_lcores == 0) {
+ RTE_LOG(ERR, USER1, "Cannot run with 0 cores! Increase the number of cores\n");
+ return -EINVAL;
+ }
enabled_cdev_count = nb_lcores;
RTE_LOG(INFO, USER1,
- " There's more available devices than cores!"
+ "There's more available devices than cores!"
" The number of devices has been aligned to %d cores\n",
nb_lcores);
}
data += data_to_read;
}
+ printf("\n");
if (test_data->input_data_sz > actual_file_sz)
RTE_LOG(INFO, USER1,
"%zu bytes read from file %s, extending the file %.2f times\n",
return ret;
}
+static void
+comp_perf_cleanup_on_signal(int signalNumber __rte_unused)
+{
+ test_data->perf_comp_force_stop = 1;
+}
+
+static void
+comp_perf_register_cleanup_on_signal(void)
+{
+ signal(SIGTERM, comp_perf_cleanup_on_signal);
+ signal(SIGINT, comp_perf_cleanup_on_signal);
+}
+
int
main(int argc, char **argv)
{
uint8_t level_idx = 0;
int ret, i;
- struct comp_test_data *test_data;
void *ctx[RTE_MAX_LCORE] = {};
uint8_t enabled_cdevs[RTE_COMPRESS_MAX_DEVS];
int nb_compressdevs = 0;
rte_exit(EXIT_FAILURE, "Cannot reserve memory in socket %d\n",
rte_socket_id());
+ comp_perf_register_cleanup_on_signal();
+
ret = EXIT_SUCCESS;
test_data->cleanup = ST_TEST_DATA;
comp_perf_options_default(test_data);
else
test_data->level = test_data->level_lst.list[0];
- printf("App uses socket: %u\n", rte_socket_id());
+ printf("\nApp uses socket: %u\n", rte_socket_id());
printf("Burst size = %u\n", test_data->burst_sz);
- printf("File size = %zu\n", test_data->input_data_sz);
+ printf("Input data size = %zu\n", test_data->input_data_sz);
+ if (test_data->test == CPERF_TEST_TYPE_PMDCC)
+ printf("Cycle-count delay = %u [us]\n",
+ test_data->cyclecount_delay);
test_data->cleanup = ST_DURING_TEST;
total_nb_qps = nb_compressdevs * test_data->nb_qps;
i++;
}
+ print_test_dynamics(test_data);
+
while (test_data->level <= test_data->level_lst.max) {
i = 0;
/* fallthrough */
case ST_COMPDEV:
for (i = 0; i < nb_compressdevs &&
- i < RTE_COMPRESS_MAX_DEVS; i++)
+ i < RTE_COMPRESS_MAX_DEVS; i++) {
rte_compressdev_stop(enabled_cdevs[i]);
+ rte_compressdev_close(enabled_cdevs[i]);
+ }
/* fallthrough */
case ST_TEST_DATA:
rte_free(test_data);
}
__rte_weak void *
-cperf_benchmark_test_constructor(uint8_t dev_id __rte_unused,
+cperf_cyclecount_test_constructor(uint8_t dev_id __rte_unused,
+ uint16_t qp_id __rte_unused,
+ struct comp_test_data *options __rte_unused)
+{
+ RTE_LOG(INFO, USER1, "Cycle count test is not supported yet\n");
+ return NULL;
+}
+
+__rte_weak void
+cperf_cyclecount_test_destructor(void *arg __rte_unused)
+{
+ RTE_LOG(INFO, USER1, "Something wrong happened!!!\n");
+}
+
+__rte_weak int
+cperf_cyclecount_test_runner(void *test_ctx __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak void *
+cperf_throughput_test_constructor(uint8_t dev_id __rte_unused,
uint16_t qp_id __rte_unused,
struct comp_test_data *options __rte_unused)
{
}
__rte_weak void
-cperf_benchmark_test_destructor(void *arg __rte_unused)
+cperf_throughput_test_destructor(void *arg __rte_unused)
{
}
__rte_weak int
-cperf_benchmark_test_runner(void *test_ctx __rte_unused)
+cperf_throughput_test_runner(void *test_ctx __rte_unused)
{
return 0;
}