cryptodev: move IV parameters to session
[dpdk.git] / app / test-crypto-perf / cperf_test_throughput.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37
38 #include "cperf_test_throughput.h"
39 #include "cperf_ops.h"
40
41 struct cperf_throughput_ctx {
42         uint8_t dev_id;
43         uint16_t qp_id;
44         uint8_t lcore_id;
45
46         struct rte_mempool *pkt_mbuf_pool_in;
47         struct rte_mempool *pkt_mbuf_pool_out;
48         struct rte_mbuf **mbufs_in;
49         struct rte_mbuf **mbufs_out;
50
51         struct rte_mempool *crypto_op_pool;
52
53         struct rte_cryptodev_sym_session *sess;
54
55         cperf_populate_ops_t populate_ops;
56
57         const struct cperf_options *options;
58         const struct cperf_test_vector *test_vector;
59 };
60
61 static void
62 cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
63 {
64         uint32_t i;
65
66         if (ctx) {
67                 if (ctx->sess)
68                         rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
69
70                 if (ctx->mbufs_in) {
71                         for (i = 0; i < mbuf_nb; i++)
72                                 rte_pktmbuf_free(ctx->mbufs_in[i]);
73
74                         rte_free(ctx->mbufs_in);
75                 }
76
77                 if (ctx->mbufs_out) {
78                         for (i = 0; i < mbuf_nb; i++) {
79                                 if (ctx->mbufs_out[i] != NULL)
80                                         rte_pktmbuf_free(ctx->mbufs_out[i]);
81                         }
82
83                         rte_free(ctx->mbufs_out);
84                 }
85
86                 if (ctx->pkt_mbuf_pool_in)
87                         rte_mempool_free(ctx->pkt_mbuf_pool_in);
88
89                 if (ctx->pkt_mbuf_pool_out)
90                         rte_mempool_free(ctx->pkt_mbuf_pool_out);
91
92                 if (ctx->crypto_op_pool)
93                         rte_mempool_free(ctx->crypto_op_pool);
94
95                 rte_free(ctx);
96         }
97 }
98
99 static struct rte_mbuf *
100 cperf_mbuf_create(struct rte_mempool *mempool,
101                 uint32_t segments_nb,
102                 const struct cperf_options *options,
103                 const struct cperf_test_vector *test_vector)
104 {
105         struct rte_mbuf *mbuf;
106         uint32_t segment_sz = options->max_buffer_size / segments_nb;
107         uint32_t last_sz = options->max_buffer_size % segments_nb;
108         uint8_t *mbuf_data;
109         uint8_t *test_data =
110                         (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
111                                         test_vector->plaintext.data :
112                                         test_vector->ciphertext.data;
113
114         mbuf = rte_pktmbuf_alloc(mempool);
115         if (mbuf == NULL)
116                 goto error;
117
118         mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
119         if (mbuf_data == NULL)
120                 goto error;
121
122         memcpy(mbuf_data, test_data, segment_sz);
123         test_data += segment_sz;
124         segments_nb--;
125
126         while (segments_nb) {
127                 struct rte_mbuf *m;
128
129                 m = rte_pktmbuf_alloc(mempool);
130                 if (m == NULL)
131                         goto error;
132
133                 rte_pktmbuf_chain(mbuf, m);
134
135                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
136                 if (mbuf_data == NULL)
137                         goto error;
138
139                 memcpy(mbuf_data, test_data, segment_sz);
140                 test_data += segment_sz;
141                 segments_nb--;
142         }
143
144         if (last_sz) {
145                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
146                 if (mbuf_data == NULL)
147                         goto error;
148
149                 memcpy(mbuf_data, test_data, last_sz);
150         }
151
152         if (options->op_type != CPERF_CIPHER_ONLY) {
153                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
154                                 options->auth_digest_sz);
155                 if (mbuf_data == NULL)
156                         goto error;
157         }
158
159         if (options->op_type == CPERF_AEAD) {
160                 uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
161                         RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
162
163                 if (aead == NULL)
164                         goto error;
165
166                 memcpy(aead, test_vector->aad.data, test_vector->aad.length);
167         }
168
169         return mbuf;
170 error:
171         if (mbuf != NULL)
172                 rte_pktmbuf_free(mbuf);
173
174         return NULL;
175 }
176
177 void *
178 cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
179                 const struct cperf_options *options,
180                 const struct cperf_test_vector *test_vector,
181                 const struct cperf_op_fns *op_fns)
182 {
183         struct cperf_throughput_ctx *ctx = NULL;
184         unsigned int mbuf_idx = 0;
185         char pool_name[32] = "";
186
187         ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
188         if (ctx == NULL)
189                 goto err;
190
191         ctx->dev_id = dev_id;
192         ctx->qp_id = qp_id;
193
194         ctx->populate_ops = op_fns->populate_ops;
195         ctx->options = options;
196         ctx->test_vector = test_vector;
197
198         /* IV goes at the end of the cryptop operation */
199         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
200                 sizeof(struct rte_crypto_sym_op);
201
202         ctx->sess = op_fns->sess_create(dev_id, options, test_vector, iv_offset);
203         if (ctx->sess == NULL)
204                 goto err;
205
206         snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
207                         dev_id);
208
209         ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
210                         options->pool_sz * options->segments_nb, 0, 0,
211                         RTE_PKTMBUF_HEADROOM +
212                         RTE_CACHE_LINE_ROUNDUP(
213                                 (options->max_buffer_size / options->segments_nb) +
214                                 (options->max_buffer_size % options->segments_nb) +
215                                         options->auth_digest_sz),
216                         rte_socket_id());
217
218         if (ctx->pkt_mbuf_pool_in == NULL)
219                 goto err;
220
221         /* Generate mbufs_in with plaintext populated for test */
222         ctx->mbufs_in = rte_malloc(NULL,
223                         (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
224
225         for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
226                 ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
227                                 ctx->pkt_mbuf_pool_in, options->segments_nb,
228                                 options, test_vector);
229                 if (ctx->mbufs_in[mbuf_idx] == NULL)
230                         goto err;
231         }
232
233         if (options->out_of_place == 1) {
234
235                 snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
236                                 dev_id);
237
238                 ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
239                                 pool_name, options->pool_sz, 0, 0,
240                                 RTE_PKTMBUF_HEADROOM +
241                                 RTE_CACHE_LINE_ROUNDUP(
242                                         options->max_buffer_size +
243                                         options->auth_digest_sz),
244                                 rte_socket_id());
245
246                 if (ctx->pkt_mbuf_pool_out == NULL)
247                         goto err;
248         }
249
250         ctx->mbufs_out = rte_malloc(NULL,
251                         (sizeof(struct rte_mbuf *) *
252                         ctx->options->pool_sz), 0);
253
254         for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
255                 if (options->out_of_place == 1) {
256                         ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
257                                         ctx->pkt_mbuf_pool_out, 1,
258                                         options, test_vector);
259                         if (ctx->mbufs_out[mbuf_idx] == NULL)
260                                 goto err;
261                 } else {
262                         ctx->mbufs_out[mbuf_idx] = NULL;
263                 }
264         }
265
266         snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
267                         dev_id);
268
269         uint16_t priv_size = test_vector->iv.length;
270
271         ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
272                         RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
273                         512, priv_size, rte_socket_id());
274         if (ctx->crypto_op_pool == NULL)
275                 goto err;
276
277         return ctx;
278 err:
279         cperf_throughput_test_free(ctx, mbuf_idx);
280
281         return NULL;
282 }
283
284 int
285 cperf_throughput_test_runner(void *test_ctx)
286 {
287         struct cperf_throughput_ctx *ctx = test_ctx;
288         uint16_t test_burst_size;
289         uint8_t burst_size_idx = 0;
290
291         static int only_once;
292
293         struct rte_crypto_op *ops[ctx->options->max_burst_size];
294         struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
295         uint64_t i;
296
297         uint32_t lcore = rte_lcore_id();
298
299 #ifdef CPERF_LINEARIZATION_ENABLE
300         struct rte_cryptodev_info dev_info;
301         int linearize = 0;
302
303         /* Check if source mbufs require coalescing */
304         if (ctx->options->segments_nb > 1) {
305                 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
306                 if ((dev_info.feature_flags &
307                                 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
308                         linearize = 1;
309         }
310 #endif /* CPERF_LINEARIZATION_ENABLE */
311
312         ctx->lcore_id = lcore;
313
314         /* Warm up the host CPU before starting the test */
315         for (i = 0; i < ctx->options->total_ops; i++)
316                 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
317
318         /* Get first size from range or list */
319         if (ctx->options->inc_burst_size != 0)
320                 test_burst_size = ctx->options->min_burst_size;
321         else
322                 test_burst_size = ctx->options->burst_size_list[0];
323
324         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
325                 sizeof(struct rte_crypto_sym_op);
326
327         while (test_burst_size <= ctx->options->max_burst_size) {
328                 uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
329                 uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
330
331                 uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
332
333                 uint16_t ops_unused = 0;
334
335                 tsc_start = rte_rdtsc_precise();
336
337                 while (ops_enqd_total < ctx->options->total_ops) {
338
339                         uint16_t burst_size = ((ops_enqd_total + test_burst_size)
340                                         <= ctx->options->total_ops) ?
341                                                         test_burst_size :
342                                                         ctx->options->total_ops -
343                                                         ops_enqd_total;
344
345                         uint16_t ops_needed = burst_size - ops_unused;
346
347                         /* Allocate crypto ops from pool */
348                         if (ops_needed != rte_crypto_op_bulk_alloc(
349                                         ctx->crypto_op_pool,
350                                         RTE_CRYPTO_OP_TYPE_SYMMETRIC,
351                                         ops, ops_needed))
352                                 return -1;
353
354                         /* Setup crypto op, attach mbuf etc */
355                         (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
356                                         &ctx->mbufs_out[m_idx],
357                                         ops_needed, ctx->sess, ctx->options,
358                                         ctx->test_vector, iv_offset);
359
360                         /**
361                          * When ops_needed is smaller than ops_enqd, the
362                          * unused ops need to be moved to the front for
363                          * next round use.
364                          */
365                         if (unlikely(ops_enqd > ops_needed)) {
366                                 size_t nb_b_to_mov = ops_unused * sizeof(
367                                                 struct rte_crypto_op *);
368
369                                 memmove(&ops[ops_needed], &ops[ops_enqd],
370                                         nb_b_to_mov);
371                         }
372
373 #ifdef CPERF_LINEARIZATION_ENABLE
374                         if (linearize) {
375                                 /* PMD doesn't support scatter-gather and source buffer
376                                  * is segmented.
377                                  * We need to linearize it before enqueuing.
378                                  */
379                                 for (i = 0; i < burst_size; i++)
380                                         rte_pktmbuf_linearize(ops[i]->sym->m_src);
381                         }
382 #endif /* CPERF_LINEARIZATION_ENABLE */
383
384                         /* Enqueue burst of ops on crypto device */
385                         ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
386                                         ops, burst_size);
387                         if (ops_enqd < burst_size)
388                                 ops_enqd_failed++;
389
390                         /**
391                          * Calculate number of ops not enqueued (mainly for hw
392                          * accelerators whose ingress queue can fill up).
393                          */
394                         ops_unused = burst_size - ops_enqd;
395                         ops_enqd_total += ops_enqd;
396
397
398                         /* Dequeue processed burst of ops from crypto device */
399                         ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
400                                         ops_processed, test_burst_size);
401
402                         if (likely(ops_deqd))  {
403                                 /* free crypto ops so they can be reused. We don't free
404                                  * the mbufs here as we don't want to reuse them as
405                                  * the crypto operation will change the data and cause
406                                  * failures.
407                                  */
408                                 rte_mempool_put_bulk(ctx->crypto_op_pool,
409                                                 (void **)ops_processed, ops_deqd);
410
411                                 ops_deqd_total += ops_deqd;
412                         } else {
413                                 /**
414                                  * Count dequeue polls which didn't return any
415                                  * processed operations. This statistic is mainly
416                                  * relevant to hw accelerators.
417                                  */
418                                 ops_deqd_failed++;
419                         }
420
421                         m_idx += ops_needed;
422                         m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
423                                         0 : m_idx;
424                 }
425
426                 /* Dequeue any operations still in the crypto device */
427
428                 while (ops_deqd_total < ctx->options->total_ops) {
429                         /* Sending 0 length burst to flush sw crypto device */
430                         rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
431
432                         /* dequeue burst */
433                         ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
434                                         ops_processed, test_burst_size);
435                         if (ops_deqd == 0)
436                                 ops_deqd_failed++;
437                         else {
438                                 rte_mempool_put_bulk(ctx->crypto_op_pool,
439                                                 (void **)ops_processed, ops_deqd);
440
441                                 ops_deqd_total += ops_deqd;
442                         }
443                 }
444
445                 tsc_end = rte_rdtsc_precise();
446                 tsc_duration = (tsc_end - tsc_start);
447
448                 /* Calculate average operations processed per second */
449                 double ops_per_second = ((double)ctx->options->total_ops /
450                                 tsc_duration) * rte_get_tsc_hz();
451
452                 /* Calculate average throughput (Gbps) in bits per second */
453                 double throughput_gbps = ((ops_per_second *
454                                 ctx->options->test_buffer_size * 8) / 1000000000);
455
456                 /* Calculate average cycles per packet */
457                 double cycles_per_packet = ((double)tsc_duration /
458                                 ctx->options->total_ops);
459
460                 if (!ctx->options->csv) {
461                         if (!only_once)
462                                 printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
463                                         "lcore id", "Buf Size", "Burst Size",
464                                         "Enqueued", "Dequeued", "Failed Enq",
465                                         "Failed Deq", "MOps", "Gbps",
466                                         "Cycles/Buf");
467                         only_once = 1;
468
469                         printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
470                                         "%12"PRIu64"%12.4f%12.4f%12.2f\n",
471                                         ctx->lcore_id,
472                                         ctx->options->test_buffer_size,
473                                         test_burst_size,
474                                         ops_enqd_total,
475                                         ops_deqd_total,
476                                         ops_enqd_failed,
477                                         ops_deqd_failed,
478                                         ops_per_second/1000000,
479                                         throughput_gbps,
480                                         cycles_per_packet);
481                 } else {
482                         if (!only_once)
483                                 printf("# lcore id, Buffer Size(B),"
484                                         "Burst Size,Enqueued,Dequeued,Failed Enq,"
485                                         "Failed Deq,Ops(Millions),Throughput(Gbps),"
486                                         "Cycles/Buf\n\n");
487                         only_once = 1;
488
489                         printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
490                                         "%.f3;%.f3;%.f3\n",
491                                         ctx->lcore_id,
492                                         ctx->options->test_buffer_size,
493                                         test_burst_size,
494                                         ops_enqd_total,
495                                         ops_deqd_total,
496                                         ops_enqd_failed,
497                                         ops_deqd_failed,
498                                         ops_per_second/1000000,
499                                         throughput_gbps,
500                                         cycles_per_packet);
501                 }
502
503                 /* Get next size from range or list */
504                 if (ctx->options->inc_burst_size != 0)
505                         test_burst_size += ctx->options->inc_burst_size;
506                 else {
507                         if (++burst_size_idx == ctx->options->burst_size_count)
508                                 break;
509                         test_burst_size = ctx->options->burst_size_list[burst_size_idx];
510                 }
511
512         }
513
514         return 0;
515 }
516
517
518 void
519 cperf_throughput_test_destructor(void *arg)
520 {
521         struct cperf_throughput_ctx *ctx = arg;
522
523         if (ctx == NULL)
524                 return;
525
526         cperf_throughput_test_free(ctx, ctx->options->pool_sz);
527 }