sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.length = options->test_buffer_size;
sym_op->cipher.data.offset = 0;
}
sym_op->m_dst = bufs_out[i];
/* auth parameters */
- sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.length = options->test_buffer_size;
sym_op->auth.data.offset = 0;
}
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
sym_op->cipher.iv.length = test_vector->iv.length;
- sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.length = options->test_buffer_size;
sym_op->cipher.data.offset = 0;
}
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
- uint32_t offset = options->buffer_sz;
+ uint32_t offset = options->test_buffer_size;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
}
- sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.length = options->test_buffer_size;
sym_op->auth.data.offset = 0;
}
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
sym_op->cipher.iv.length = test_vector->iv.length;
- sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.length = options->test_buffer_size;
sym_op->cipher.data.offset = 0;
/* authentication parameters */
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
- uint32_t offset = options->buffer_sz;
+ uint32_t offset = options->test_buffer_size;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
sym_op->auth.aad.length = options->auth_aad_sz;
}
- sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.length = options->test_buffer_size;
sym_op->auth.data.offset = 0;
}
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
sym_op->cipher.iv.length = test_vector->iv.length;
- sym_op->cipher.data.length = options->buffer_sz;
+ sym_op->cipher.data.length = options->test_buffer_size;
sym_op->cipher.data.offset =
RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
sym_op->auth.digest.length = options->auth_digest_sz;
}
- sym_op->auth.data.length = options->buffer_sz;
+ sym_op->auth.data.length = options->test_buffer_size;
sym_op->auth.data.offset = options->auth_aad_sz;
}
#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
#define CPERF_CSV ("csv-friendly")
+#define MAX_LIST 32
enum cperf_perf_test_type {
CPERF_TEST_TYPE_THROUGHPUT,
uint32_t pool_sz;
uint32_t total_ops;
- uint32_t burst_sz;
- uint32_t buffer_sz;
+ uint32_t test_buffer_size;
uint32_t segments_nb;
char device_type[RTE_CRYPTODEV_NAME_LEN];
uint16_t auth_key_sz;
uint16_t auth_digest_sz;
uint16_t auth_aad_sz;
+
+ uint32_t buffer_size_list[MAX_LIST];
+ uint8_t buffer_size_count;
+ uint32_t max_buffer_size;
+ uint32_t min_buffer_size;
+ uint32_t inc_buffer_size;
+
+ uint32_t burst_size_list[MAX_LIST];
+ uint8_t burst_size_count;
+ uint32_t max_burst_size;
+ uint32_t min_burst_size;
+ uint32_t inc_burst_size;
+
};
void
return 0;
}
+static int
+parse_range(const char *arg, uint32_t *min, uint32_t *max, uint32_t *inc)
+{
+ char *token;
+ uint32_t number;
+
+ char *copy_arg = strdup(arg);
+
+ if (copy_arg == NULL)
+ return -1;
+
+ token = strtok(copy_arg, ":");
+
+ /* Parse minimum value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_range;
+
+ *min = number;
+ } else
+ goto err_range;
+
+ token = strtok(NULL, ":");
+
+ /* Parse increment value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_range;
+
+ *inc = number;
+ } else
+ goto err_range;
+
+ token = strtok(NULL, ":");
+
+ /* Parse maximum value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0 ||
+ number < *min)
+ goto err_range;
+
+ *max = number;
+ } else
+ goto err_range;
+
+ if (strtok(NULL, ":") != NULL)
+ goto err_range;
+
+ free(copy_arg);
+ return 0;
+
+err_range:
+ free(copy_arg);
+ return -1;
+}
+
+static int
+parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
+{
+ char *token;
+ uint32_t number;
+ uint8_t count = 0;
+
+ char *copy_arg = strdup(arg);
+
+ if (copy_arg == NULL)
+ return -1;
+
+ token = strtok(copy_arg, ",");
+
+ /* Parse first value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_list;
+
+ list[count++] = number;
+ *min = number;
+ *max = number;
+ } else
+ goto err_list;
+
+ token = strtok(NULL, ",");
+
+ while (token != NULL) {
+ if (count == MAX_LIST) {
+ RTE_LOG(WARNING, USER1, "Using only the first %u sizes\n",
+ MAX_LIST);
+ break;
+ }
+
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_list;
+
+ list[count++] = number;
+
+ if (number < *min)
+ *min = number;
+ if (number > *max)
+ *max = number;
+
+ token = strtok(NULL, ",");
+ }
+
+ free(copy_arg);
+ return count;
+
+err_list:
+ free(copy_arg);
+ return -1;
+}
+
static int
parse_total_ops(struct cperf_options *opts, const char *arg)
{
static int
parse_burst_sz(struct cperf_options *opts, const char *arg)
{
- int ret = parse_uint32_t(&opts->burst_sz, arg);
+ int ret;
+
+ /* Try parsing the argument as a range, if it fails, parse it as a list */
+ if (parse_range(arg, &opts->min_burst_size, &opts->max_burst_size,
+ &opts->inc_burst_size) < 0) {
+ ret = parse_list(arg, opts->burst_size_list,
+ &opts->min_burst_size,
+ &opts->max_burst_size);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ return -1;
+ }
+ opts->burst_size_count = ret;
+ }
- if (ret)
- RTE_LOG(ERR, USER1, "failed to parse burst size");
- return ret;
+ return 0;
}
static int
parse_buffer_sz(struct cperf_options *opts, const char *arg)
{
- uint32_t i, valid_buf_sz[] = {
- 32, 64, 128, 256, 384, 512, 768, 1024, 1280, 1536, 1792,
- 2048
- };
-
- if (parse_uint32_t(&opts->buffer_sz, arg)) {
- RTE_LOG(ERR, USER1, "failed to parse buffer size");
- return -1;
+ int ret;
+
+ /* Try parsing the argument as a range, if it fails, parse it as a list */
+ if (parse_range(arg, &opts->min_buffer_size, &opts->max_buffer_size,
+ &opts->inc_buffer_size) < 0) {
+ ret = parse_list(arg, opts->buffer_size_list,
+ &opts->min_buffer_size,
+ &opts->max_buffer_size);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ return -1;
+ }
+ opts->buffer_size_count = ret;
}
- for (i = 0; i < RTE_DIM(valid_buf_sz); i++)
- if (valid_buf_sz[i] == opts->buffer_sz)
- return 0;
-
- RTE_LOG(ERR, USER1, "invalid buffer size specified");
- return -1;
+ return 0;
}
static int
opts->pool_sz = 8192;
opts->total_ops = 10000000;
- opts->burst_sz = 32;
- opts->buffer_sz = 64;
+
+ opts->buffer_size_list[0] = 64;
+ opts->buffer_size_count = 1;
+ opts->max_buffer_size = 64;
+ opts->min_buffer_size = 64;
+ opts->inc_buffer_size = 0;
+
+ opts->burst_size_list[0] = 32;
+ opts->burst_size_count = 1;
+ opts->max_burst_size = 32;
+ opts->min_burst_size = 32;
+ opts->inc_burst_size = 0;
+
opts->segments_nb = 1;
strncpy(opts->device_type, "crypto_aesni_mb",
int
cperf_options_check(struct cperf_options *options)
{
- if (options->segments_nb > options->buffer_sz) {
+ if (options->segments_nb > options->min_buffer_size) {
RTE_LOG(ERR, USER1,
"Segments number greater than buffer size.\n");
return -EINVAL;
return -EINVAL;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ (options->inc_buffer_size != 0 ||
+ options->buffer_size_count > 1)) {
+ RTE_LOG(ERR, USER1, "Only one buffer size is allowed when "
+ "using the verify test.\n");
+ return -EINVAL;
+ }
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ (options->inc_burst_size != 0 ||
+ options->burst_size_count > 1)) {
+ RTE_LOG(ERR, USER1, "Only one burst size is allowed when "
+ "using the verify test.\n");
+ return -EINVAL;
+ }
+
if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
options->auth_op !=
void
cperf_options_dump(struct cperf_options *opts)
{
+ uint8_t size_idx;
+
printf("# Crypto Performance Application Options:\n");
printf("#\n");
printf("# cperf test: %s\n", cperf_test_type_strs[opts->test]);
printf("#\n");
printf("# size of crypto op / mbuf pool: %u\n", opts->pool_sz);
printf("# total number of ops: %u\n", opts->total_ops);
- printf("# burst size: %u\n", opts->burst_sz);
- printf("# buffer size: %u\n", opts->buffer_sz);
- printf("# segments per buffer: %u\n", opts->segments_nb);
+ if (opts->inc_buffer_size != 0) {
+ printf("# buffer size:\n");
+ printf("#\t min: %u\n", opts->min_buffer_size);
+ printf("#\t max: %u\n", opts->max_buffer_size);
+ printf("#\t inc: %u\n", opts->inc_buffer_size);
+ } else {
+ printf("# buffer sizes: ");
+ for (size_idx = 0; size_idx < opts->buffer_size_count; size_idx++)
+ printf("%u ", opts->buffer_size_list[size_idx]);
+ printf("\n");
+ }
+ if (opts->inc_burst_size != 0) {
+ printf("# burst size:\n");
+ printf("#\t min: %u\n", opts->min_burst_size);
+ printf("#\t max: %u\n", opts->max_burst_size);
+ printf("#\t inc: %u\n", opts->inc_burst_size);
+ } else {
+ printf("# burst sizes: ");
+ for (size_idx = 0; size_idx < opts->burst_size_count; size_idx++)
+ printf("%u ", opts->burst_size_list[size_idx]);
+ printf("\n");
+ }
+ printf("\n# segments per buffer: %u\n", opts->segments_nb);
printf("#\n");
printf("# cryptodev type: %s\n", opts->device_type);
printf("#\n");
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->buffer_sz / segments_nb;
- uint32_t last_sz = options->buffer_sz % segments_nb;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
options->pool_sz * options->segments_nb, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- (options->buffer_sz / options->segments_nb) +
- (options->buffer_sz % options->segments_nb) +
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
options->auth_digest_sz),
rte_socket_id());
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- options->buffer_sz +
+ options->max_buffer_size +
options->auth_digest_sz),
rte_socket_id());
{
struct cperf_latency_ctx *ctx = arg;
struct cperf_op_result *pres;
+ uint16_t test_burst_size;
+ uint8_t burst_size_idx = 0;
static int only_once;
if (ctx == NULL)
return 0;
- struct rte_crypto_op *ops[ctx->options->burst_sz];
- struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint64_t i;
uint32_t lcore = rte_lcore_id();
for (i = 0; i < ctx->options->total_ops; i++)
rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
- uint64_t ops_enqd = 0, ops_deqd = 0;
- uint64_t m_idx = 0, b_idx = 0;
+ /* Get first size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size = ctx->options->min_burst_size;
+ else
+ test_burst_size = ctx->options->burst_size_list[0];
- uint64_t tsc_val, tsc_end, tsc_start;
- uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
- uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
- uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
+ while (test_burst_size <= ctx->options->max_burst_size) {
+ uint64_t ops_enqd = 0, ops_deqd = 0;
+ uint64_t m_idx = 0, b_idx = 0;
- while (enqd_tot < ctx->options->total_ops) {
- uint16_t burst_size = ((enqd_tot + ctx->options->burst_sz)
- <= ctx->options->total_ops) ?
- ctx->options->burst_sz :
- ctx->options->total_ops -
- enqd_tot;
+ uint64_t tsc_val, tsc_end, tsc_start;
+ uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
+ uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
+ uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
- /* Allocate crypto ops from pool */
- if (burst_size != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size))
- return -1;
+ while (enqd_tot < ctx->options->total_ops) {
- /* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
- burst_size, ctx->sess, ctx->options,
- ctx->test_vector);
+ uint16_t burst_size = ((enqd_tot + test_burst_size)
+ <= ctx->options->total_ops) ?
+ test_burst_size :
+ ctx->options->total_ops -
+ enqd_tot;
- tsc_start = rte_rdtsc_precise();
+ /* Allocate crypto ops from pool */
+ if (burst_size != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, burst_size))
+ return -1;
+
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ burst_size, ctx->sess, ctx->options,
+ ctx->test_vector);
+
+ tsc_start = rte_rdtsc_precise();
#ifdef CPERF_LINEARIZATION_ENABLE
- if (linearize) {
- /* PMD doesn't support scatter-gather and source buffer
- * is segmented.
- * We need to linearize it before enqueuing.
- */
- for (i = 0; i < burst_size; i++)
- rte_pktmbuf_linearize(ops[i]->sym->m_src);
- }
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
#endif /* CPERF_LINEARIZATION_ENABLE */
- /* Enqueue burst of ops on crypto device */
- ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
- ops, burst_size);
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
- /* Dequeue processed burst of ops from crypto device */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
- tsc_end = rte_rdtsc_precise();
+ tsc_end = rte_rdtsc_precise();
- for (i = 0; i < ops_enqd; i++) {
- ctx->res[tsc_idx].tsc_start = tsc_start;
- ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
- tsc_idx++;
- }
+ /* Free memory for not enqueued operations */
+ for (i = ops_enqd; i < burst_size; i++)
+ rte_crypto_op_free(ops[i]);
- /* Free memory for not enqueued operations */
- for (i = ops_enqd; i < burst_size; i++)
- rte_crypto_op_free(ops[i]);
-
- if (likely(ops_deqd)) {
- /*
- * free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
-
- rte_crypto_op_free(ops_processed[i]);
+ for (i = 0; i < burst_size; i++) {
+ ctx->res[tsc_idx].tsc_start = tsc_start;
+ ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+ tsc_idx++;
}
- deqd_tot += ops_deqd;
- deqd_max = max(ops_deqd, deqd_max);
- deqd_min = min(ops_deqd, deqd_min);
- }
+ if (likely(ops_deqd)) {
+ /*
+ * free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ for (i = 0; i < ops_deqd; i++) {
+ pres = (struct cperf_op_result *)
+ (ops_processed[i]->opaque_data);
+ pres->status = ops_processed[i]->status;
+ pres->tsc_end = tsc_end;
+
+ rte_crypto_op_free(ops_processed[i]);
+ }
+
+ deqd_tot += ops_deqd;
+ deqd_max = max(ops_deqd, deqd_max);
+ deqd_min = min(ops_deqd, deqd_min);
+ }
- enqd_tot += ops_enqd;
- enqd_max = max(ops_enqd, enqd_max);
- enqd_min = min(ops_enqd, enqd_min);
+ enqd_tot += ops_enqd;
+ enqd_max = max(ops_enqd, enqd_max);
+ enqd_min = min(ops_enqd, enqd_min);
- m_idx += ops_enqd;
- m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
- 0 : m_idx;
- b_idx++;
- }
+ m_idx += ops_enqd;
+ m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+ 0 : m_idx;
+ b_idx++;
+ }
- /* Dequeue any operations still in the crypto device */
- while (deqd_tot < ctx->options->total_ops) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+ /* Dequeue any operations still in the crypto device */
+ while (deqd_tot < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
- /* dequeue burst */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
+ tsc_end = rte_rdtsc_precise();
- tsc_end = rte_rdtsc_precise();
+ if (ops_deqd != 0) {
+ for (i = 0; i < ops_deqd; i++) {
+ pres = (struct cperf_op_result *)
+ (ops_processed[i]->opaque_data);
+ pres->status = ops_processed[i]->status;
+ pres->tsc_end = tsc_end;
- if (ops_deqd != 0) {
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ rte_crypto_op_free(ops_processed[i]);
+ }
- rte_crypto_op_free(ops_processed[i]);
+ deqd_tot += ops_deqd;
+ deqd_max = max(ops_deqd, deqd_max);
+ deqd_min = min(ops_deqd, deqd_min);
}
+ }
- deqd_tot += ops_deqd;
- deqd_max = max(ops_deqd, deqd_max);
- deqd_min = min(ops_deqd, deqd_min);
+ for (i = 0; i < tsc_idx; i++) {
+ tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
+ tsc_max = max(tsc_val, tsc_max);
+ tsc_min = min(tsc_val, tsc_min);
+ tsc_tot += tsc_val;
}
- }
- for (i = 0; i < tsc_idx; i++) {
- tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
- tsc_max = max(tsc_val, tsc_max);
- tsc_min = min(tsc_val, tsc_min);
- tsc_tot += tsc_val;
- }
+ double time_tot, time_avg, time_max, time_min;
- double time_tot, time_avg, time_max, time_min;
+ const uint64_t tunit = 1000000; /* us */
+ const uint64_t tsc_hz = rte_get_tsc_hz();
- const uint64_t tunit = 1000000; /* us */
- const uint64_t tsc_hz = rte_get_tsc_hz();
+ uint64_t enqd_avg = enqd_tot / b_idx;
+ uint64_t deqd_avg = deqd_tot / b_idx;
+ uint64_t tsc_avg = tsc_tot / tsc_idx;
- uint64_t enqd_avg = enqd_tot / b_idx;
- uint64_t deqd_avg = deqd_tot / b_idx;
- uint64_t tsc_avg = tsc_tot / tsc_idx;
+ time_tot = tunit*(double)(tsc_tot) / tsc_hz;
+ time_avg = tunit*(double)(tsc_avg) / tsc_hz;
+ time_max = tunit*(double)(tsc_max) / tsc_hz;
+ time_min = tunit*(double)(tsc_min) / tsc_hz;
- time_tot = tunit*(double)(tsc_tot) / tsc_hz;
- time_avg = tunit*(double)(tsc_avg) / tsc_hz;
- time_max = tunit*(double)(tsc_max) / tsc_hz;
- time_min = tunit*(double)(tsc_min) / tsc_hz;
+ if (ctx->options->csv) {
+ if (!only_once)
+ printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
+ "Packet Size, cycles, time (us)");
- if (ctx->options->csv) {
- if (!only_once)
- printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
- "Packet Size, cycles, time (us)");
+ for (i = 0; i < ctx->options->total_ops; i++) {
- for (i = 0; i < ctx->options->total_ops; i++) {
+ printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
+ ctx->lcore_id, ctx->options->test_buffer_size,
+ test_burst_size, i + 1,
+ ctx->res[i].tsc_end - ctx->res[i].tsc_start,
+ tunit * (double) (ctx->res[i].tsc_end
+ - ctx->res[i].tsc_start)
+ / tsc_hz);
- printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
- ctx->lcore_id, ctx->options->buffer_sz,
- ctx->options->burst_sz, i + 1,
- ctx->res[i].tsc_end - ctx->res[i].tsc_start,
- tunit * (double) (ctx->res[i].tsc_end
- - ctx->res[i].tsc_start)
- / tsc_hz);
+ }
+ only_once = 1;
+ } else {
+ printf("\n# Device %d on lcore %u\n", ctx->dev_id,
+ ctx->lcore_id);
+ printf("\n# total operations: %u", ctx->options->total_ops);
+ printf("\n# Buffer size: %u", ctx->options->test_buffer_size);
+ printf("\n# Burst size: %u", test_burst_size);
+ printf("\n# Number of bursts: %"PRIu64,
+ b_idx);
+
+ printf("\n#");
+ printf("\n# \t Total\t Average\t "
+ "Maximum\t Minimum");
+ printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%10"PRIu64, enqd_tot,
+ enqd_avg, enqd_max, enqd_min);
+ printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%10"PRIu64, deqd_tot,
+ deqd_avg, deqd_max, deqd_min);
+ printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%10"PRIu64, tsc_tot,
+ tsc_avg, tsc_max, tsc_min);
+ printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f",
+ time_tot, time_avg, time_max, time_min);
+ printf("\n\n");
}
- only_once = 1;
- } else {
- printf("\n# Device %d on lcore %u\n", ctx->dev_id,
- ctx->lcore_id);
- printf("\n# total operations: %u", ctx->options->total_ops);
- printf("\n# Buffer size: %u", ctx->options->buffer_sz);
- printf("\n# Burst size: %u", ctx->options->burst_sz);
- printf("\n# Number of bursts: %"PRIu64,
- b_idx);
-
- printf("\n#");
- printf("\n# \t Total\t Average\t "
- "Maximum\t Minimum");
- printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t"
- "%10"PRIu64"\t%10"PRIu64, enqd_tot,
- enqd_avg, enqd_max, enqd_min);
- printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t"
- "%10"PRIu64"\t%10"PRIu64, deqd_tot,
- deqd_avg, deqd_max, deqd_min);
- printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t"
- "%10"PRIu64"\t%10"PRIu64, tsc_tot,
- tsc_avg, tsc_max, tsc_min);
- printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f",
- time_tot, time_avg, time_max, time_min);
- printf("\n\n");
+ /* Get next size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size += ctx->options->inc_burst_size;
+ else {
+ if (++burst_size_idx == ctx->options->burst_size_count)
+ break;
+ test_burst_size =
+ ctx->options->burst_size_list[burst_size_idx];
+ }
}
return 0;
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->buffer_sz / segments_nb;
- uint32_t last_sz = options->buffer_sz % segments_nb;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
options->pool_sz * options->segments_nb, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- (options->buffer_sz / options->segments_nb) +
- (options->buffer_sz % options->segments_nb) +
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
options->auth_digest_sz),
rte_socket_id());
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- options->buffer_sz +
+ options->max_buffer_size +
options->auth_digest_sz),
rte_socket_id());
cperf_throughput_test_runner(void *test_ctx)
{
struct cperf_throughput_ctx *ctx = test_ctx;
+ uint16_t test_burst_size;
+ uint8_t burst_size_idx = 0;
static int only_once;
- struct rte_crypto_op *ops[ctx->options->burst_sz];
- struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint64_t i;
uint32_t lcore = rte_lcore_id();
for (i = 0; i < ctx->options->total_ops; i++)
rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
- uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
- uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
- uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
+ /* Get first size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size = ctx->options->min_burst_size;
+ else
+ test_burst_size = ctx->options->burst_size_list[0];
- tsc_start = rte_rdtsc_precise();
- while (ops_enqd_total < ctx->options->total_ops) {
+ while (test_burst_size <= ctx->options->max_burst_size) {
+ uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
+ uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+
+ uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
uint16_t ops_unused = 0;
- uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
- <= ctx->options->total_ops) ?
- ctx->options->burst_sz :
- ctx->options->total_ops -
- ops_enqd_total;
+ tsc_start = rte_rdtsc_precise();
+
+ while (ops_enqd_total < ctx->options->total_ops) {
+
+ uint16_t burst_size = ((ops_enqd_total + test_burst_size)
+ <= ctx->options->total_ops) ?
+ test_burst_size :
+ ctx->options->total_ops -
+ ops_enqd_total;
- uint16_t ops_needed = burst_size - ops_unused;
+ uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
- return -1;
+ /* Allocate crypto ops from pool */
+ if (ops_needed != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, ops_needed))
+ return -1;
- /* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
- ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ ops_needed, ctx->sess, ctx->options,
+ ctx->test_vector);
#ifdef CPERF_LINEARIZATION_ENABLE
- if (linearize) {
- /* PMD doesn't support scatter-gather and source buffer
- * is segmented.
- * We need to linearize it before enqueuing.
- */
- for (i = 0; i < burst_size; i++)
- rte_pktmbuf_linearize(ops[i]->sym->m_src);
- }
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
#endif /* CPERF_LINEARIZATION_ENABLE */
- /* Enqueue burst of ops on crypto device */
- ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
- ops, burst_size);
- if (ops_enqd < burst_size)
- ops_enqd_failed++;
-
- /**
- * Calculate number of ops not enqueued (mainly for hw
- * accelerators whose ingress queue can fill up).
- */
- ops_unused = burst_size - ops_enqd;
- ops_enqd_total += ops_enqd;
-
-
- /* Dequeue processed burst of ops from crypto device */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
-
- if (likely(ops_deqd)) {
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
+ if (ops_enqd < burst_size)
+ ops_enqd_failed++;
- ops_deqd_total += ops_deqd;
- } else {
/**
- * Count dequeue polls which didn't return any
- * processed operations. This statistic is mainly
- * relevant to hw accelerators.
+ * Calculate number of ops not enqueued (mainly for hw
+ * accelerators whose ingress queue can fill up).
*/
- ops_deqd_failed++;
+ ops_unused = burst_size - ops_enqd;
+ ops_enqd_total += ops_enqd;
+
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+
+ if (likely(ops_deqd)) {
+ /* free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ for (i = 0; i < ops_deqd; i++)
+ rte_crypto_op_free(ops_processed[i]);
+
+ ops_deqd_total += ops_deqd;
+ } else {
+ /**
+ * Count dequeue polls which didn't return any
+ * processed operations. This statistic is mainly
+ * relevant to hw accelerators.
+ */
+ ops_deqd_failed++;
+ }
+
+ m_idx += ops_needed;
+ m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+ 0 : m_idx;
}
- m_idx += ops_needed;
- m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
- 0 : m_idx;
- }
+ /* Dequeue any operations still in the crypto device */
- /* Dequeue any operations still in the crypto device */
+ while (ops_deqd_total < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
- while (ops_deqd_total < ctx->options->total_ops) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+ if (ops_deqd == 0)
+ ops_deqd_failed++;
+ else {
+ for (i = 0; i < ops_deqd; i++)
+ rte_crypto_op_free(ops_processed[i]);
- /* dequeue burst */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
- if (ops_deqd == 0)
- ops_deqd_failed++;
- else {
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ ops_deqd_total += ops_deqd;
+ }
+ }
- ops_deqd_total += ops_deqd;
+ tsc_end = rte_rdtsc_precise();
+ tsc_duration = (tsc_end - tsc_start);
+
+ /* Calculate average operations processed per second */
+ double ops_per_second = ((double)ctx->options->total_ops /
+ tsc_duration) * rte_get_tsc_hz();
+
+ /* Calculate average throughput (Gbps) in bits per second */
+ double throughput_gbps = ((ops_per_second *
+ ctx->options->test_buffer_size * 8) / 1000000000);
+
+ /* Calculate average cycles per packet */
+ double cycles_per_packet = ((double)tsc_duration /
+ ctx->options->total_ops);
+
+ if (!ctx->options->csv) {
+ if (!only_once)
+ printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
+ "lcore id", "Buf Size", "Burst Size",
+ "Enqueued", "Dequeued", "Failed Enq",
+ "Failed Deq", "MOps", "Gbps",
+ "Cycles/Buf");
+ only_once = 1;
+
+ printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
+ "%12"PRIu64"%12.4f%12.4f%12.2f\n",
+ ctx->lcore_id,
+ ctx->options->test_buffer_size,
+ test_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_per_second/1000000,
+ throughput_gbps,
+ cycles_per_packet);
+ } else {
+ if (!only_once)
+ printf("# lcore id, Buffer Size(B),"
+ "Burst Size,Enqueued,Dequeued,Failed Enq,"
+ "Failed Deq,Ops(Millions),Throughput(Gbps),"
+ "Cycles/Buf\n\n");
+ only_once = 1;
+
+ printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.f3;%.f3;%.f3\n",
+ ctx->lcore_id,
+ ctx->options->test_buffer_size,
+ test_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_per_second/1000000,
+ throughput_gbps,
+ cycles_per_packet);
+ }
+
+ /* Get next size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size += ctx->options->inc_burst_size;
+ else {
+ if (++burst_size_idx == ctx->options->burst_size_count)
+ break;
+ test_burst_size = ctx->options->burst_size_list[burst_size_idx];
}
- }
- tsc_end = rte_rdtsc_precise();
- tsc_duration = (tsc_end - tsc_start);
-
- /* Calculate average operations processed per second */
- double ops_per_second = ((double)ctx->options->total_ops /
- tsc_duration) * rte_get_tsc_hz();
-
- /* Calculate average throughput (Gbps) in bits per second */
- double throughput_gbps = ((ops_per_second *
- ctx->options->buffer_sz * 8) / 1000000000);
-
- /* Calculate average cycles per packet */
- double cycles_per_packet = ((double)tsc_duration /
- ctx->options->total_ops);
-
- if (!ctx->options->csv) {
- if (!only_once)
- printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
- "lcore id", "Buf Size", "Burst Size",
- "Enqueued", "Dequeued", "Failed Enq",
- "Failed Deq", "MOps", "Gbps",
- "Cycles/Buf");
- only_once = 1;
-
- printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
- "%12"PRIu64"%12.4f%12.4f%12.2f\n",
- ctx->lcore_id,
- ctx->options->buffer_sz,
- ctx->options->burst_sz,
- ops_enqd_total,
- ops_deqd_total,
- ops_enqd_failed,
- ops_deqd_failed,
- ops_per_second/1000000,
- throughput_gbps,
- cycles_per_packet);
- } else {
- if (!only_once)
- printf("# lcore id, Buffer Size(B),"
- "Burst Size,Enqueued,Dequeued,Failed Enq,"
- "Failed Deq,Ops(Millions),Throughput(Gbps),"
- "Cycles/Buf\n\n");
- only_once = 1;
-
- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
- "%.f3;%.f3;%.f3\n",
- ctx->lcore_id,
- ctx->options->buffer_sz,
- ctx->options->burst_sz,
- ops_enqd_total,
- ops_deqd_total,
- ops_enqd_failed,
- ops_deqd_failed,
- ops_per_second/1000000,
- throughput_gbps,
- cycles_per_packet);
}
return 0;
if (tc_found)
vector->plaintext.length = data_length;
else {
- if (opts->buffer_sz > data_length) {
+ if (opts->max_buffer_size > data_length) {
printf("Global plaintext shorter than "
"buffer_sz\n");
return -1;
}
- vector->plaintext.length = opts->buffer_sz;
+ vector->plaintext.length = opts->max_buffer_size;
}
} else if (strstr(key_token, "cipher_key")) {
if (tc_found)
vector->ciphertext.length = data_length;
else {
- if (opts->buffer_sz > data_length) {
+ if (opts->max_buffer_size > data_length) {
printf("Global ciphertext shorter than "
"buffer_sz\n");
return -1;
}
- vector->ciphertext.length = opts->buffer_sz;
+ vector->ciphertext.length = opts->max_buffer_size;
}
} else if (strstr(key_token, "aad")) {
/* other values not included in the file */
test_vector->data.cipher_offset = 0;
- test_vector->data.cipher_length = opts->buffer_sz;
+ test_vector->data.cipher_length = opts->max_buffer_size;
test_vector->data.auth_offset = 0;
- test_vector->data.auth_length = opts->buffer_sz;
+ test_vector->data.auth_length = opts->max_buffer_size;
return test_vector;
}
return t_vec;
t_vec->plaintext.data = plaintext;
- t_vec->plaintext.length = options->buffer_sz;
+ t_vec->plaintext.length = options->max_buffer_size;
if (options->op_type == CPERF_CIPHER_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
}
memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
}
- t_vec->ciphertext.length = options->buffer_sz;
+ t_vec->ciphertext.length = options->max_buffer_size;
t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
t_vec->iv.length = options->cipher_iv_sz;
t_vec->data.cipher_offset = 0;
- t_vec->data.cipher_length = options->buffer_sz;
+ t_vec->data.cipher_length = options->max_buffer_size;
}
if (options->op_type == CPERF_AUTH_ONLY ||
t_vec->digest.length = options->auth_digest_sz;
memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
t_vec->data.auth_offset = 0;
- t_vec->data.auth_length = options->buffer_sz;
+ t_vec->data.auth_length = options->max_buffer_size;
}
return t_vec;
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->buffer_sz / segments_nb;
- uint32_t last_sz = options->buffer_sz % segments_nb;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
options->pool_sz * options->segments_nb, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- (options->buffer_sz / options->segments_nb) +
- (options->buffer_sz % options->segments_nb) +
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
options->auth_digest_sz),
rte_socket_id());
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- options->buffer_sz +
+ options->max_buffer_size +
options->auth_digest_sz),
rte_socket_id());
cipher = 1;
cipher_offset = 0;
auth = 1;
- auth_offset = vector->plaintext.length;
+ auth_offset = options->test_buffer_size;
break;
case CPERF_AUTH_ONLY:
cipher = 0;
cipher_offset = 0;
auth = 1;
- auth_offset = vector->plaintext.length;
+ auth_offset = options->test_buffer_size;
break;
case CPERF_AUTH_THEN_CIPHER:
cipher = 1;
cipher_offset = 0;
auth = 1;
- auth_offset = vector->plaintext.length;
+ auth_offset = options->test_buffer_size;
break;
case CPERF_AEAD:
cipher = 1;
cipher_offset = vector->aad.length;
auth = 1;
- auth_offset = vector->aad.length + vector->plaintext.length;
+ auth_offset = vector->aad.length + options->test_buffer_size;
break;
}
if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
res += memcmp(data + cipher_offset,
vector->ciphertext.data,
- vector->ciphertext.length);
+ options->test_buffer_size);
else
res += memcmp(data + cipher_offset,
vector->plaintext.data,
- vector->plaintext.length);
+ options->test_buffer_size);
}
if (auth == 1) {
uint64_t i, m_idx = 0;
uint16_t ops_unused = 0;
- struct rte_crypto_op *ops[ctx->options->burst_sz];
- struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint32_t lcore = rte_lcore_id();
while (ops_enqd_total < ctx->options->total_ops) {
- uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
+ uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
<= ctx->options->total_ops) ?
- ctx->options->burst_sz :
+ ctx->options->max_burst_size :
ctx->options->total_ops -
ops_enqd_total;
/* Dequeue processed burst of ops from crypto device */
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
+ ops_processed, ctx->options->max_burst_size);
m_idx += ops_needed;
- if (m_idx + ctx->options->burst_sz > ctx->options->pool_sz)
+ if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
m_idx = 0;
if (ops_deqd == 0) {
/* dequeue burst */
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
+ ops_processed, ctx->options->max_burst_size);
if (ops_deqd == 0) {
ops_deqd_failed++;
continue;
printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
"%12"PRIu64"%12"PRIu64"\n",
ctx->lcore_id,
- ctx->options->buffer_sz,
- ctx->options->burst_sz,
+ ctx->options->max_buffer_size,
+ ctx->options->max_burst_size,
ops_enqd_total,
ops_deqd_total,
ops_enqd_failed,
printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
"%"PRIu64"\n",
ctx->lcore_id,
- ctx->options->buffer_sz,
- ctx->options->burst_sz,
+ ctx->options->max_buffer_size,
+ ctx->options->max_burst_size,
ops_enqd_total,
ops_deqd_total,
ops_enqd_failed,
} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
if (test_vec->ciphertext.data == NULL)
return -1;
- if (test_vec->ciphertext.length != opts->buffer_sz)
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
if (test_vec->iv.data == NULL)
return -1;
if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
if (test_vec->auth_key.data == NULL)
return -1;
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length != opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->auth_digest_sz)
return -1;
}
if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
if (test_vec->ciphertext.data == NULL)
return -1;
- if (test_vec->ciphertext.length != opts->buffer_sz)
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
if (test_vec->iv.data == NULL)
return -1;
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length != opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->auth_digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_AEAD) {
if (test_vec->plaintext.data == NULL)
return -1;
- if (test_vec->plaintext.length != opts->buffer_sz)
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->ciphertext.data == NULL)
+ return -1;
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
if (test_vec->aad.data == NULL)
return -1;
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length != opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->auth_digest_sz)
return -1;
}
return 0;
uint8_t cdev_id, i;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
+ uint8_t buffer_size_idx = 0;
+
int ret;
uint32_t lcore_id;
i++;
}
- i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ /* Get first size from range or list */
+ if (opts.inc_buffer_size != 0)
+ opts.test_buffer_size = opts.min_buffer_size;
+ else
+ opts.test_buffer_size = opts.buffer_size_list[0];
- if (i == nb_cryptodevs)
- break;
+ while (opts.test_buffer_size <= opts.max_buffer_size) {
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- cdev_id = enabled_cdevs[i];
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
- rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+ rte_eal_remote_launch(cperf_testmap[opts.test].runner,
ctx[cdev_id], lcore_id);
- i++;
+ i++;
+ }
+ rte_eal_mp_wait_lcore();
+
+ /* Get next size from range or list */
+ if (opts.inc_buffer_size != 0)
+ opts.test_buffer_size += opts.inc_buffer_size;
+ else {
+ if (++buffer_size_idx == opts.buffer_size_count)
+ break;
+ opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
+ }
}
- rte_eal_mp_wait_lcore();
-
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
Set the number of packets per burst.
+ This can be set as:
+ * Single value (i.e. ``--burst-sz 16``)
+ * Range of values, using the following structure ``min:inc:max``,
+ where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+ is the maximum size (i.e. ``--burst-sz 16:2:32``)
+ * List of values, up to 32 values, separated in commas (i.e. ``--burst-sz 16,24,32``)
+
* ``--buffer-sz <n>``
Set the size of single packet (plaintext or ciphertext in it).
+ This can be set as:
+ * Single value (i.e. ``--buffer-sz 16``)
+ * Range of values, using the following structure ``min:inc:max``,
+ where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+ is the maximum size (i.e. ``--buffer-sz 16:2:32``)
+ * List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
+
+
* ``--segments-nb <n>``
Set the number of segments per packet.