static int
cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
sym_op->cipher.data.length = options->test_buffer_size;
static int
cperf_set_ops_null_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* auth parameters */
sym_op->auth.data.length = options->test_buffer_size;
static int
cperf_set_ops_cipher(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
static int
cperf_set_ops_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
if (test_vector->auth_iv.length) {
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
static int
cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
static int
cperf_set_ops_aead(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* AEAD parameters */
sym_op->aead.data.length = options->test_buffer_size;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
#include "cperf_test_common.h"
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segment_sz,
- uint32_t segments_nb,
- const struct cperf_options *options)
+struct obj_params {
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+ uint16_t segment_sz;
+ uint16_t segments_nb;
+};
+
+static void
+fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
{
- struct rte_mbuf *mbuf;
- uint8_t *mbuf_data;
- uint32_t remaining_bytes = options->max_buffer_size;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
+ uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = 0;
+ m->buf_addr = (char *)m + mbuf_hdr_size;
+ m->buf_physaddr = rte_mempool_virt2phy(mp, obj) +
+ mbuf_offset + mbuf_hdr_size;
+ m->buf_len = segment_sz;
+ m->data_len = segment_sz;
+
+ /* No headroom needed for the buffer */
+ m->data_off = 0;
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = 1;
+ m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+}
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
+static void
+fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
+ uint16_t segments_nb)
+{
+ uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+ uint16_t remaining_segments = segments_nb;
+ struct rte_mbuf *next_mbuf;
+ phys_addr_t next_seg_phys_addr = rte_mempool_virt2phy(mp, obj) +
+ mbuf_offset + mbuf_hdr_size;
+
+ do {
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = 0;
+ m->buf_addr = (char *)m + mbuf_hdr_size;
+ m->buf_physaddr = next_seg_phys_addr;
+ next_seg_phys_addr += mbuf_hdr_size + segment_sz;
+ m->buf_len = segment_sz;
+ m->data_len = segment_sz;
+
+ /* No headroom needed for the buffer */
+ m->data_off = 0;
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = segments_nb;
+ m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
+ mbuf_hdr_size + segment_sz);
+ m->next = next_mbuf;
+ m = next_mbuf;
+ remaining_segments--;
+
+ } while (remaining_segments > 0);
+
+ m->next = NULL;
+}
- if (options->max_buffer_size <= segment_sz)
- remaining_bytes = 0;
+static void
+mempool_obj_init(struct rte_mempool *mp,
+ void *opaque_arg,
+ void *obj,
+ __attribute__((unused)) unsigned int i)
+{
+ struct obj_params *params = opaque_arg;
+ struct rte_crypto_op *op = obj;
+ struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
+ params->src_buf_offset);
+ /* Set crypto operation */
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+ /* Set source buffer */
+ op->sym->m_src = m;
+ if (params->segments_nb == 1)
+ fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
+ params->segment_sz);
else
- remaining_bytes -= segment_sz;
-
- segments_nb--;
-
- while (remaining_bytes) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- if (remaining_bytes <= segment_sz)
- remaining_bytes = 0;
- else
- remaining_bytes -= segment_sz;
-
- segments_nb--;
- }
-
- /*
- * If there was not enough room for the digest at the end
- * of the last segment, allocate a new one
- */
- if (segments_nb != 0) {
- struct rte_mbuf *m;
- m = rte_pktmbuf_alloc(mempool);
-
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
+ fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
+ params->segment_sz, params->segments_nb);
+
+
+ /* Set destination buffer */
+ if (params->dst_buf_offset) {
+ m = (struct rte_mbuf *) ((uint8_t *) obj +
+ params->dst_buf_offset);
+ fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
+ params->segment_sz);
+ op->sym->m_dst = m;
+ } else
+ op->sym->m_dst = NULL;
}
int
const struct cperf_test_vector *test_vector,
uint8_t dev_id, uint16_t qp_id,
size_t extra_op_priv_size,
- struct rte_mempool **pkt_mbuf_pool_in,
- struct rte_mempool **pkt_mbuf_pool_out,
- struct rte_mbuf ***mbufs_in,
- struct rte_mbuf ***mbufs_out,
- struct rte_mempool **crypto_op_pool)
+ uint32_t *src_buf_offset,
+ uint32_t *dst_buf_offset,
+ struct rte_mempool **pool)
{
- unsigned int mbuf_idx = 0;
char pool_name[32] = "";
-
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%u_qp_%u",
- dev_id, qp_id);
-
+ int ret;
+
+ /* Calculate the object size */
+ uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ uint16_t crypto_op_private_size = extra_op_priv_size +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ test_vector->aead_iv.length +
+ options->aead_aad_sz;
+ uint16_t crypto_op_total_size = crypto_op_size +
+ crypto_op_private_size;
+ uint16_t crypto_op_total_size_padded =
+ RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
+ uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
uint32_t max_size = options->max_buffer_size + options->digest_sz;
uint16_t segments_nb = (max_size % options->segment_sz) ?
(max_size / options->segment_sz) + 1 :
max_size / options->segment_sz;
+ uint32_t obj_size = crypto_op_total_size_padded +
+ (mbuf_size * segments_nb);
- *pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM + options->segment_sz,
- rte_socket_id());
-
- if (*pkt_mbuf_pool_in == NULL)
- return -1;
+ snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
+ dev_id, qp_id);
- /* Generate mbufs_in with plaintext populated for test */
- *mbufs_in = (struct rte_mbuf **)rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) * options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- (*mbufs_in)[mbuf_idx] = cperf_mbuf_create(
- *pkt_mbuf_pool_in,
- options->segment_sz,
- segments_nb,
- options);
- if ((*mbufs_in)[mbuf_idx] == NULL)
- return -1;
+ *src_buf_offset = crypto_op_total_size_padded;
+
+ struct obj_params params = {
+ .segment_sz = options->segment_sz,
+ .segments_nb = segments_nb,
+ .src_buf_offset = crypto_op_total_size_padded,
+ .dst_buf_offset = 0
+ };
+
+ if (options->out_of_place) {
+ *dst_buf_offset = *src_buf_offset +
+ (mbuf_size * segments_nb);
+ params.dst_buf_offset = *dst_buf_offset;
+ /* Destination buffer will be one segment only */
+ obj_size += max_size;
}
- *mbufs_out = (struct rte_mbuf **)rte_zmalloc(NULL,
- (sizeof(struct rte_mbuf *) *
- options->pool_sz), 0);
-
- if (options->out_of_place == 1) {
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%u_qp_%u",
- dev_id, qp_id);
-
- *pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM + max_size,
- rte_socket_id());
-
- if (*pkt_mbuf_pool_out == NULL)
- return -1;
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- (*mbufs_out)[mbuf_idx] = cperf_mbuf_create(
- *pkt_mbuf_pool_out, max_size,
- 1, options);
- if ((*mbufs_out)[mbuf_idx] == NULL)
- return -1;
- }
+ *pool = rte_mempool_create_empty(pool_name,
+ options->pool_sz, obj_size, 512, 0,
+ rte_socket_id(), 0);
+ if (*pool == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Cannot allocate mempool for device %u\n",
+ dev_id);
+ return -1;
}
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%u_qp_%u",
- dev_id, qp_id);
-
- uint16_t priv_size = RTE_ALIGN_CEIL(test_vector->cipher_iv.length +
- test_vector->auth_iv.length + test_vector->aead_iv.length +
- extra_op_priv_size, 16) +
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
-
- *crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
- if (*crypto_op_pool == NULL)
+ ret = rte_mempool_set_ops_byname(*pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+ if (ret != 0) {
+ RTE_LOG(ERR, USER1,
+ "Error setting mempool handler for device %u\n",
+ dev_id);
return -1;
-
- return 0;
-}
-
-void
-cperf_free_common_memory(const struct cperf_options *options,
- struct rte_mempool *pkt_mbuf_pool_in,
- struct rte_mempool *pkt_mbuf_pool_out,
- struct rte_mbuf **mbufs_in,
- struct rte_mbuf **mbufs_out,
- struct rte_mempool *crypto_op_pool)
-{
- uint32_t i = 0;
-
- if (mbufs_in) {
- while (mbufs_in[i] != NULL &&
- i < options->pool_sz)
- rte_pktmbuf_free(mbufs_in[i++]);
-
- rte_free(mbufs_in);
}
- if (mbufs_out) {
- i = 0;
- while (mbufs_out[i] != NULL
- && i < options->pool_sz)
- rte_pktmbuf_free(mbufs_out[i++]);
-
- rte_free(mbufs_out);
+ ret = rte_mempool_populate_default(*pool);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Error populating mempool for device %u\n",
+ dev_id);
+ return -1;
}
- if (pkt_mbuf_pool_in)
- rte_mempool_free(pkt_mbuf_pool_in);
+ rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms);
- if (pkt_mbuf_pool_out)
- rte_mempool_free(pkt_mbuf_pool_out);
-
- if (crypto_op_pool)
- rte_mempool_free(crypto_op_pool);
+ return 0;
}
const struct cperf_test_vector *test_vector,
uint8_t dev_id, uint16_t qp_id,
size_t extra_op_priv_size,
- struct rte_mempool **pkt_mbuf_pool_in,
- struct rte_mempool **pkt_mbuf_pool_out,
- struct rte_mbuf ***mbufs_in,
- struct rte_mbuf ***mbufs_out,
- struct rte_mempool **crypto_op_pool);
-
-void
-cperf_free_common_memory(const struct cperf_options *options,
- struct rte_mempool *pkt_mbuf_pool_in,
- struct rte_mempool *pkt_mbuf_pool_out,
- struct rte_mbuf **mbufs_in,
- struct rte_mbuf **mbufs_out,
- struct rte_mempool *crypto_op_pool);
+ uint32_t *src_buf_offset,
+ uint32_t *dst_buf_offset,
+ struct rte_mempool **pool);
#endif /* _CPERF_TEST_COMMON_H_ */
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
struct cperf_op_result *res;
rte_cryptodev_sym_session_free(ctx->sess);
}
- cperf_free_common_memory(ctx->options,
- ctx->pkt_mbuf_pool_in,
- ctx->pkt_mbuf_pool_out,
- ctx->mbufs_in, ctx->mbufs_out,
- ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx->res);
rte_free(ctx);
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id,
extra_op_priv_size,
- &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
- &ctx->mbufs_in, &ctx->mbufs_out,
- &ctx->crypto_op_pool) < 0)
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
- uint64_t m_idx = 0, b_idx = 0;
+ uint64_t b_idx = 0;
uint64_t tsc_val, tsc_end, tsc_start;
uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
ctx->options->total_ops -
enqd_tot;
- /* Allocate crypto ops from pool */
- if (burst_size != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ burst_size) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
burst_size, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
/* Free memory for not enqueued operations */
if (ops_enqd != burst_size)
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)&ops[ops_enqd],
burst_size - ops_enqd);
}
if (likely(ops_deqd)) {
- /*
- * free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
+ /* Free crypto ops so they can be reused. */
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
enqd_max = max(ops_enqd, enqd_max);
enqd_min = min(ops_enqd, enqd_min);
- m_idx += ops_enqd;
- m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
- 0 : m_idx;
b_idx++;
}
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_crypto_op **ops;
struct rte_crypto_op **ops_processed;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
rte_cryptodev_sym_session_free(ctx->sess);
}
- cperf_free_common_memory(ctx->options,
- ctx->pkt_mbuf_pool_in,
- ctx->pkt_mbuf_pool_out,
- ctx->mbufs_in, ctx->mbufs_out,
- ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
+
if (ctx->ops)
rte_free(ctx->ops);
goto err;
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
- &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
- &ctx->mbufs_in, &ctx->mbufs_out,
- &ctx->crypto_op_pool) < 0)
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
ctx->ops = rte_malloc("ops", alloc_sz, 0);
test_burst_size);
struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
- if (burst_size != rte_crypto_op_bulk_alloc(
- state->ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size))
- return -1;
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
+ burst_size) != 0) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
+ return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(state->ctx->populate_ops)(ops,
- &state->ctx->mbufs_in[cur_iter_op],
- &state->ctx->mbufs_out[cur_iter_op], burst_size,
+ state->ctx->src_buf_offset,
+ state->ctx->dst_buf_offset,
+ burst_size,
state->ctx->sess, state->opts,
state->ctx->test_vector, iv_offset);
}
}
#endif /* CPERF_LINEARIZATION_ENABLE */
- rte_mempool_put_bulk(state->ctx->crypto_op_pool, (void **)ops,
+ rte_mempool_put_bulk(state->ctx->pool, (void **)ops,
burst_size);
}
iter_ops_needed - cur_iter_op, test_burst_size);
struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
- if (burst_size != rte_crypto_op_bulk_alloc(
- state->ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size))
- return -1;
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
+ burst_size) != 0) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
+ return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(state->ctx->populate_ops)(ops,
- &state->ctx->mbufs_in[cur_iter_op],
- &state->ctx->mbufs_out[cur_iter_op], burst_size,
+ state->ctx->src_buf_offset,
+ state->ctx->dst_buf_offset,
+ burst_size,
state->ctx->sess, state->opts,
state->ctx->test_vector, iv_offset);
}
* we may not have processed all ops that we allocated, so
* free everything we've allocated.
*/
- rte_mempool_put_bulk(state->ctx->crypto_op_pool,
+ rte_mempool_put_bulk(state->ctx->pool,
(void **)state->ctx->ops, iter_ops_allocd);
}
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
rte_cryptodev_sym_session_free(ctx->sess);
}
- cperf_free_common_memory(ctx->options,
- ctx->pkt_mbuf_pool_in,
- ctx->pkt_mbuf_pool_out,
- ctx->mbufs_in, ctx->mbufs_out,
- ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx);
}
goto err;
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
- &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
- &ctx->mbufs_in, &ctx->mbufs_out,
- &ctx->crypto_op_pool) < 0)
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
return ctx;
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
- uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
+ uint64_t tsc_start, tsc_end, tsc_duration;
uint16_t ops_unused = 0;
uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
- ops_needed, ctx->sess, ctx->options,
- ctx->test_vector, iv_offset);
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
+ ops_needed, ctx->sess,
+ ctx->options, ctx->test_vector,
+ iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
ops_processed, test_burst_size);
if (likely(ops_deqd)) {
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
ops_deqd_failed++;
}
- m_idx += ops_needed;
- m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
- 0 : m_idx;
}
/* Dequeue any operations still in the crypto device */
if (ops_deqd == 0)
ops_deqd_failed++;
else {
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
-
ops_deqd_total += ops_deqd;
}
}
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
rte_cryptodev_sym_session_free(ctx->sess);
}
- cperf_free_common_memory(ctx->options,
- ctx->pkt_mbuf_pool_in,
- ctx->pkt_mbuf_pool_out,
- ctx->mbufs_in, ctx->mbufs_out,
- ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx);
}
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the cryptop operation */
+ /* IV goes at the end of the crypto operation */
uint16_t iv_offset = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
goto err;
if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
- &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
- &ctx->mbufs_in, &ctx->mbufs_out,
- &ctx->crypto_op_pool) < 0)
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
return ctx;
static int only_once;
- uint64_t i, m_idx = 0;
+ uint64_t i;
uint16_t ops_unused = 0;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
ops_needed, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
ops_processed, ctx->options->max_burst_size);
- m_idx += ops_needed;
- if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
- m_idx = 0;
-
if (ops_deqd == 0) {
/**
* Count dequeue polls which didn't return any
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_crypto_op_free(ops_processed[i]);
}
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_crypto_op_free(ops_processed[i]);
}
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}