#include "cperf_test_verify.h"
#include "cperf_ops.h"
-struct cperf_verify_results {
- uint64_t ops_enqueued;
- uint64_t ops_dequeued;
-
- uint64_t ops_enqueued_failed;
- uint64_t ops_dequeued_failed;
-
- uint64_t ops_failed;
-};
-
struct cperf_verify_ctx {
uint8_t dev_id;
uint16_t qp_id;
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
- struct cperf_verify_results results;
-
};
struct cperf_op_result {
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->buffer_sz / segments_nb;
- uint32_t last_sz = options->buffer_sz % segments_nb;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
}
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
options->pool_sz * options->segments_nb, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- (options->buffer_sz / options->segments_nb) +
- (options->buffer_sz % options->segments_nb) +
- options->auth_digest_sz),
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
goto err;
/* Generate mbufs_in with plaintext populated for test */
- if (ctx->options->pool_sz % ctx->options->burst_sz)
- goto err;
-
ctx->mbufs_in = rte_malloc(NULL,
(sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
- options->buffer_sz +
- options->auth_digest_sz),
+ options->max_buffer_size +
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
cipher = 1;
cipher_offset = 0;
auth = 1;
- auth_offset = vector->plaintext.length;
+ auth_offset = options->test_buffer_size;
break;
case CPERF_AUTH_ONLY:
cipher = 0;
cipher_offset = 0;
auth = 1;
- auth_offset = vector->plaintext.length;
+ auth_offset = options->test_buffer_size;
break;
case CPERF_AUTH_THEN_CIPHER:
cipher = 1;
cipher_offset = 0;
auth = 1;
- auth_offset = vector->plaintext.length;
+ auth_offset = options->test_buffer_size;
break;
case CPERF_AEAD:
cipher = 1;
cipher_offset = vector->aad.length;
auth = 1;
- auth_offset = vector->aad.length + vector->plaintext.length;
+ auth_offset = vector->aad.length + options->test_buffer_size;
break;
}
if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
res += memcmp(data + cipher_offset,
vector->ciphertext.data,
- vector->ciphertext.length);
+ options->test_buffer_size);
else
res += memcmp(data + cipher_offset,
vector->plaintext.data,
- vector->plaintext.length);
+ options->test_buffer_size);
}
if (auth == 1) {
if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
res += memcmp(data + auth_offset,
vector->digest.data,
- vector->digest.length);
+ options->digest_sz);
}
return !!res;
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+ uint64_t ops_failed = 0;
+
+ static int only_once;
uint64_t i, m_idx = 0;
uint16_t ops_unused = 0;
- struct rte_crypto_op *ops[ctx->options->burst_sz];
- struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint32_t lcore = rte_lcore_id();
printf("\n# Running verify test on device: %u, lcore: %u\n",
ctx->dev_id, lcore);
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (ops_enqd_total < ctx->options->total_ops) {
- uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
+ uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
<= ctx->options->total_ops) ?
- ctx->options->burst_sz :
+ ctx->options->max_burst_size :
ctx->options->total_ops -
ops_enqd_total;
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
/* Dequeue processed burst of ops from crypto device */
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
+ ops_processed, ctx->options->max_burst_size);
m_idx += ops_needed;
- if (m_idx + ctx->options->burst_sz > ctx->options->pool_sz)
+ if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
m_idx = 0;
if (ops_deqd == 0) {
for (i = 0; i < ops_deqd; i++) {
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
- ctx->results.ops_failed++;
+ ops_failed++;
/* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
rte_crypto_op_free(ops_processed[i]);
- ops_deqd_total += ops_deqd;
}
+ ops_deqd_total += ops_deqd;
}
/* Dequeue any operations still in the crypto device */
/* dequeue burst */
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->burst_sz);
+ ops_processed, ctx->options->max_burst_size);
if (ops_deqd == 0) {
ops_deqd_failed++;
continue;
for (i = 0; i < ops_deqd; i++) {
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
- ctx->results.ops_failed++;
+ ops_failed++;
/* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
rte_crypto_op_free(ops_processed[i]);
- ops_deqd_total += ops_deqd;
}
+ ops_deqd_total += ops_deqd;
}
- ctx->results.ops_enqueued = ops_enqd_total;
- ctx->results.ops_dequeued = ops_deqd_total;
+ if (!ctx->options->csv) {
+ if (!only_once)
+ printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
+ "lcore id", "Buf Size", "Burst size",
+ "Enqueued", "Dequeued", "Failed Enq",
+ "Failed Deq", "Failed Ops");
+ only_once = 1;
+
+ printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
+ "%12"PRIu64"%12"PRIu64"\n",
+ ctx->lcore_id,
+ ctx->options->max_buffer_size,
+ ctx->options->max_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_failed);
+ } else {
+ if (!only_once)
+ printf("\n# lcore id, Buffer Size(B), "
+ "Burst Size,Enqueued,Dequeued,Failed Enq,"
+ "Failed Deq,Failed Ops\n");
+ only_once = 1;
- ctx->results.ops_enqueued_failed = ops_enqd_failed;
- ctx->results.ops_dequeued_failed = ops_deqd_failed;
+ printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%"PRIu64"\n",
+ ctx->lcore_id,
+ ctx->options->max_buffer_size,
+ ctx->options->max_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_failed);
+ }
return 0;
}
cperf_verify_test_destructor(void *arg)
{
struct cperf_verify_ctx *ctx = arg;
- struct cperf_verify_results *results = &ctx->results;
- static int only_once;
if (ctx == NULL)
return;
- if (!ctx->options->csv) {
- printf("\n# Device %d on lcore %u\n",
- ctx->dev_id, ctx->lcore_id);
- printf("# Buffer Size(B)\t Enqueued\t Dequeued\tFailed Enq"
- "\tFailed Deq\tEmpty Polls\n");
-
- printf("\n%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
- "%10"PRIu64"\t%10"PRIu64"\n",
- ctx->options->buffer_sz,
- results->ops_enqueued,
- results->ops_dequeued,
- results->ops_enqueued_failed,
- results->ops_dequeued_failed,
- results->ops_failed);
- } else {
- if (!only_once)
- printf("\n# CPU lcore id, Burst Size(B), "
- "Buffer Size(B),Enqueued,Dequeued,Failed Enq,"
- "Failed Deq,Empty Polls\n");
- only_once = 1;
-
- printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
- "%"PRIu64"\n",
- ctx->lcore_id,
- ctx->options->burst_sz,
- ctx->options->buffer_sz,
- results->ops_enqueued,
- results->ops_dequeued,
- results->ops_enqueued_failed,
- results->ops_dequeued_failed,
- results->ops_failed);
- }
+ rte_cryptodev_stop(ctx->dev_id);
cperf_verify_test_free(ctx, ctx->options->pool_sz);
}