+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
#include <stdio.h>
#include <unistd.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
#include <rte_eal.h>
#include <rte_cryptodev.h>
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#include <rte_cryptodev_scheduler.h>
+#endif
#include "cperf.h"
#include "cperf_options.h"
#include "cperf_test_vector_parsing.h"
#include "cperf_test_throughput.h"
#include "cperf_test_latency.h"
+#include "cperf_test_verify.h"
+#include "cperf_test_pmd_cyclecount.h"
+
+#define NUM_SESSIONS 2048
+#define SESS_MEMPOOL_CACHE_SIZE 64
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
- [CPERF_TEST_TYPE_CYCLECOUNT] = "cycle-count",
- [CPERF_TEST_TYPE_LATENCY] = "latency"
+ [CPERF_TEST_TYPE_LATENCY] = "latency",
+ [CPERF_TEST_TYPE_VERIFY] = "verify",
+ [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
};
const char *cperf_op_type_strs[] = {
cperf_throughput_test_runner,
cperf_throughput_test_destructor
},
- [CPERF_TEST_TYPE_CYCLECOUNT] = { NULL },
[CPERF_TEST_TYPE_LATENCY] = {
cperf_latency_test_constructor,
cperf_latency_test_runner,
cperf_latency_test_destructor
+ },
+ [CPERF_TEST_TYPE_VERIFY] = {
+ cperf_verify_test_constructor,
+ cperf_verify_test_runner,
+ cperf_verify_test_destructor
+ },
+ [CPERF_TEST_TYPE_PMDCC] = {
+ cperf_pmd_cyclecount_test_constructor,
+ cperf_pmd_cyclecount_test_runner,
+ cperf_pmd_cyclecount_test_destructor
}
};
static int
-cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
+cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
+ struct rte_mempool *session_pool_socket[])
{
- uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
+ uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
+ unsigned int i, j;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
- enabled_cdevs, RTE_DIM(enabled_cdevs));
+ enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
if (enabled_cdev_count == 0) {
printf("No crypto devices type %s available\n",
opts->device_type);
return -EINVAL;
}
- for (cdev_id = 0; cdev_id < enabled_cdev_count &&
- cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
+ /* Create a mempool shared by all the devices */
+ uint32_t max_sess_size = 0, sess_size;
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_size = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_size > max_sess_size)
+ max_sess_size = sess_size;
+ }
+
+ /*
+ * Calculate number of needed queue pairs, based on the amount
+ * of available number of logical cores and crypto devices.
+ * For instance, if there are 4 cores and 2 crypto devices,
+ * 2 queue pairs will be set up per device.
+ */
+ opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
+ (nb_lcores / enabled_cdev_count) + 1 :
+ nb_lcores / enabled_cdev_count;
+
+ for (i = 0; i < enabled_cdev_count &&
+ i < RTE_CRYPTO_MAX_DEVS; i++) {
+ cdev_id = enabled_cdevs[i];
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ /*
+ * If multi-core scheduler is used, limit the number
+ * of queue pairs to 1, as there is no way to know
+ * how many cores are being used by the PMD, and
+ * how many will be available for the application.
+ */
+ if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
+ rte_cryptodev_scheduler_mode_get(cdev_id) ==
+ CDEV_SCHED_MODE_MULTICORE)
+ opts->nb_qps = 1;
+#endif
+
+ struct rte_cryptodev_info cdev_info;
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+
+ rte_cryptodev_info_get(cdev_id, &cdev_info);
+ if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
+ printf("Number of needed queue pairs is higher "
+ "than the maximum number of queue pairs "
+ "per device.\n");
+ printf("Lower the number of cores or increase "
+ "the number of crypto devices\n");
+ return -EINVAL;
+ }
struct rte_cryptodev_config conf = {
- .nb_queue_pairs = 1,
- .socket_id = SOCKET_ID_ANY,
- .session_mp = {
- .nb_objs = 2048,
- .cache_size = 64
- }
- };
+ .nb_queue_pairs = opts->nb_qps,
+ .socket_id = socket_id
+ };
+
struct rte_cryptodev_qp_conf qp_conf = {
- .nb_descriptors = 2048
+ .nb_descriptors = opts->nb_descriptors
};
- ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
+ if (session_pool_socket[socket_id] == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ sess_mp = rte_mempool_create(mp_name,
+ NUM_SESSIONS,
+ max_sess_size,
+ SESS_MEMPOOL_CACHE_SIZE,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ if (sess_mp == NULL) {
+ printf("Cannot create session pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ printf("Allocated session pool on socket %d\n", socket_id);
+ session_pool_socket[socket_id] = sess_mp;
+ }
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
if (ret < 0) {
- printf("Failed to configure cryptodev %u",
- enabled_cdevs[cdev_id]);
+ printf("Failed to configure cryptodev %u", cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
- &qp_conf, SOCKET_ID_ANY);
+ for (j = 0; j < opts->nb_qps; j++) {
+ ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
+ &qp_conf, socket_id,
+ session_pool_socket[socket_id]);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
- "cryptodev %u", 0, cdev_id);
+ "cryptodev %u", j, cdev_id);
return -EINVAL;
}
+ }
- ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
+ ret = rte_cryptodev_start(cdev_id);
if (ret < 0) {
printf("Failed to start device %u: error %d\n",
- enabled_cdevs[cdev_id], ret);
+ cdev_id, ret);
return -EPERM;
}
}
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap_idx.algo.auth = opts->auth_algo;
ret = rte_cryptodev_sym_capability_check_auth(
capability,
opts->auth_key_sz,
- opts->auth_digest_sz,
- opts->auth_aad_sz);
+ opts->digest_sz,
+ opts->auth_iv_sz);
if (ret != 0)
return ret;
}
if (ret != 0)
return ret;
}
+
+ if (opts->op_type == CPERF_AEAD) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ cap_idx.algo.aead = opts->aead_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_aead(
+ capability,
+ opts->aead_key_sz,
+ opts->digest_sz,
+ opts->aead_aad_sz,
+ opts->aead_iv_sz);
+ if (ret != 0)
+ return ret;
+ }
}
return 0;
} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
if (test_vec->ciphertext.data == NULL)
return -1;
- if (test_vec->ciphertext.length != opts->buffer_sz)
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
if (test_vec->auth_key.data == NULL)
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length != opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
if (test_vec->ciphertext.data == NULL)
return -1;
- if (test_vec->ciphertext.length != opts->buffer_sz)
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length != opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_AEAD) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->ciphertext.data == NULL)
+ return -1;
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->aead_iv.data == NULL)
+ return -1;
+ if (test_vec->aead_iv.length != opts->aead_iv_sz)
return -1;
if (test_vec->aad.data == NULL)
return -1;
- if (test_vec->aad.length != opts->auth_aad_sz)
+ if (test_vec->aad.length != opts->aead_aad_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length != opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
return 0;
struct cperf_op_fns op_fns;
void *ctx[RTE_MAX_LCORE] = { };
+ struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
- int nb_cryptodevs;
+ int nb_cryptodevs = 0;
+ uint16_t total_nb_qps = 0;
uint8_t cdev_id, i;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
+ uint8_t buffer_size_idx = 0;
+
int ret;
uint32_t lcore_id;
goto err;
}
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
+ session_pool_socket);
+
if (!opts.silent)
cperf_options_dump(&opts);
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
+ nb_cryptodevs = 0;
goto err;
}
if (!opts.silent)
show_test_vector(t_vec);
+ total_nb_qps = nb_cryptodevs * opts.nb_qps;
+
i = 0;
+ uint8_t qp_id = 0, cdev_index = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
+ cdev_id = enabled_cdevs[cdev_index];
- ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+
+ ctx[i] = cperf_testmap[opts.test].constructor(
+ session_pool_socket[socket_id], cdev_id, qp_id,
&opts, t_vec, &op_fns);
- if (ctx[cdev_id] == NULL) {
+ if (ctx[i] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
goto err;
}
+ qp_id = (qp_id + 1) % opts.nb_qps;
+ if (qp_id == 0)
+ cdev_index++;
i++;
}
- i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (opts.imix_distribution_count != 0) {
+ uint8_t buffer_size_count = opts.buffer_size_count;
+ uint16_t distribution_total[buffer_size_count];
+ uint32_t op_idx;
+ uint32_t test_average_size = 0;
+ const uint32_t *buffer_size_list = opts.buffer_size_list;
+ const uint32_t *imix_distribution_list = opts.imix_distribution_list;
+
+ opts.imix_buffer_sizes = rte_malloc(NULL,
+ sizeof(uint32_t) * opts.pool_sz,
+ 0);
+ /*
+ * Calculate accumulated distribution of
+ * probabilities per packet size
+ */
+ distribution_total[0] = imix_distribution_list[0];
+ for (i = 1; i < buffer_size_count; i++)
+ distribution_total[i] = imix_distribution_list[i] +
+ distribution_total[i-1];
+
+ /* Calculate a random sequence of packet sizes, based on distribution */
+ for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
+ uint16_t random_number = rte_rand() %
+ distribution_total[buffer_size_count - 1];
+ for (i = 0; i < buffer_size_count; i++)
+ if (random_number < distribution_total[i])
+ break;
+
+ opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
+ }
- if (i == nb_cryptodevs)
- break;
+ /* Calculate average buffer size for the IMIX distribution */
+ for (i = 0; i < buffer_size_count; i++)
+ test_average_size += buffer_size_list[i] *
+ imix_distribution_list[i];
- cdev_id = enabled_cdevs[i];
+ opts.test_buffer_size = test_average_size /
+ distribution_total[buffer_size_count - 1];
- rte_eal_remote_launch(cperf_testmap[opts.test].runner,
- ctx[cdev_id], lcore_id);
- i++;
- }
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == total_nb_qps)
+ break;
- rte_eal_mp_wait_lcore();
+ rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+ ctx[i], lcore_id);
+ i++;
+ }
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == total_nb_qps)
+ break;
+ rte_eal_wait_lcore(lcore_id);
+ i++;
+ }
+ } else {
+
+ /* Get next size from range or list */
+ if (opts.inc_buffer_size != 0)
+ opts.test_buffer_size = opts.min_buffer_size;
+ else
+ opts.test_buffer_size = opts.buffer_size_list[0];
+
+ while (opts.test_buffer_size <= opts.max_buffer_size) {
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == total_nb_qps)
+ break;
+
+ rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+ ctx[i], lcore_id);
+ i++;
+ }
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == total_nb_qps)
+ break;
+ rte_eal_wait_lcore(lcore_id);
+ i++;
+ }
+
+ /* Get next size from range or list */
+ if (opts.inc_buffer_size != 0)
+ opts.test_buffer_size += opts.inc_buffer_size;
+ else {
+ if (++buffer_size_idx == opts.buffer_size_count)
+ break;
+ opts.test_buffer_size =
+ opts.buffer_size_list[buffer_size_idx];
+ }
+ }
+ }
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
-
- cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ cperf_testmap[opts.test].destructor(ctx[i]);
i++;
}
+ for (i = 0; i < nb_cryptodevs &&
+ i < RTE_CRYPTO_MAX_DEVS; i++)
+ rte_cryptodev_stop(enabled_cdevs[i]);
+
free_test_vector(t_vec, &opts);
printf("\n");
err:
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
-
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
-
- if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
- cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ if (ctx[i] && cperf_testmap[opts.test].destructor)
+ cperf_testmap[opts.test].destructor(ctx[i]);
i++;
}
+ for (i = 0; i < nb_cryptodevs &&
+ i < RTE_CRYPTO_MAX_DEVS; i++)
+ rte_cryptodev_stop(enabled_cdevs[i]);
+ rte_free(opts.imix_buffer_sizes);
free_test_vector(t_vec, &opts);
printf("\n");