cryptodev: add auth IV
[dpdk.git] / app / test-crypto-perf / cperf_test_throughput.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37
38 #include "cperf_test_throughput.h"
39 #include "cperf_ops.h"
40
41 struct cperf_throughput_ctx {
42         uint8_t dev_id;
43         uint16_t qp_id;
44         uint8_t lcore_id;
45
46         struct rte_mempool *pkt_mbuf_pool_in;
47         struct rte_mempool *pkt_mbuf_pool_out;
48         struct rte_mbuf **mbufs_in;
49         struct rte_mbuf **mbufs_out;
50
51         struct rte_mempool *crypto_op_pool;
52
53         struct rte_cryptodev_sym_session *sess;
54
55         cperf_populate_ops_t populate_ops;
56
57         const struct cperf_options *options;
58         const struct cperf_test_vector *test_vector;
59 };
60
61 static void
62 cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
63 {
64         uint32_t i;
65
66         if (ctx) {
67                 if (ctx->sess)
68                         rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
69
70                 if (ctx->mbufs_in) {
71                         for (i = 0; i < mbuf_nb; i++)
72                                 rte_pktmbuf_free(ctx->mbufs_in[i]);
73
74                         rte_free(ctx->mbufs_in);
75                 }
76
77                 if (ctx->mbufs_out) {
78                         for (i = 0; i < mbuf_nb; i++) {
79                                 if (ctx->mbufs_out[i] != NULL)
80                                         rte_pktmbuf_free(ctx->mbufs_out[i]);
81                         }
82
83                         rte_free(ctx->mbufs_out);
84                 }
85
86                 if (ctx->pkt_mbuf_pool_in)
87                         rte_mempool_free(ctx->pkt_mbuf_pool_in);
88
89                 if (ctx->pkt_mbuf_pool_out)
90                         rte_mempool_free(ctx->pkt_mbuf_pool_out);
91
92                 if (ctx->crypto_op_pool)
93                         rte_mempool_free(ctx->crypto_op_pool);
94
95                 rte_free(ctx);
96         }
97 }
98
99 static struct rte_mbuf *
100 cperf_mbuf_create(struct rte_mempool *mempool,
101                 uint32_t segments_nb,
102                 const struct cperf_options *options,
103                 const struct cperf_test_vector *test_vector)
104 {
105         struct rte_mbuf *mbuf;
106         uint32_t segment_sz = options->max_buffer_size / segments_nb;
107         uint32_t last_sz = options->max_buffer_size % segments_nb;
108         uint8_t *mbuf_data;
109         uint8_t *test_data =
110                         (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
111                                         test_vector->plaintext.data :
112                                         test_vector->ciphertext.data;
113
114         mbuf = rte_pktmbuf_alloc(mempool);
115         if (mbuf == NULL)
116                 goto error;
117
118         mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
119         if (mbuf_data == NULL)
120                 goto error;
121
122         memcpy(mbuf_data, test_data, segment_sz);
123         test_data += segment_sz;
124         segments_nb--;
125
126         while (segments_nb) {
127                 struct rte_mbuf *m;
128
129                 m = rte_pktmbuf_alloc(mempool);
130                 if (m == NULL)
131                         goto error;
132
133                 rte_pktmbuf_chain(mbuf, m);
134
135                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
136                 if (mbuf_data == NULL)
137                         goto error;
138
139                 memcpy(mbuf_data, test_data, segment_sz);
140                 test_data += segment_sz;
141                 segments_nb--;
142         }
143
144         if (last_sz) {
145                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
146                 if (mbuf_data == NULL)
147                         goto error;
148
149                 memcpy(mbuf_data, test_data, last_sz);
150         }
151
152         if (options->op_type != CPERF_CIPHER_ONLY) {
153                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
154                                 options->auth_digest_sz);
155                 if (mbuf_data == NULL)
156                         goto error;
157         }
158
159         if (options->op_type == CPERF_AEAD) {
160                 uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
161                         RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
162
163                 if (aead == NULL)
164                         goto error;
165
166                 memcpy(aead, test_vector->aad.data, test_vector->aad.length);
167         }
168
169         return mbuf;
170 error:
171         if (mbuf != NULL)
172                 rte_pktmbuf_free(mbuf);
173
174         return NULL;
175 }
176
177 void *
178 cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
179                 const struct cperf_options *options,
180                 const struct cperf_test_vector *test_vector,
181                 const struct cperf_op_fns *op_fns)
182 {
183         struct cperf_throughput_ctx *ctx = NULL;
184         unsigned int mbuf_idx = 0;
185         char pool_name[32] = "";
186
187         ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
188         if (ctx == NULL)
189                 goto err;
190
191         ctx->dev_id = dev_id;
192         ctx->qp_id = qp_id;
193
194         ctx->populate_ops = op_fns->populate_ops;
195         ctx->options = options;
196         ctx->test_vector = test_vector;
197
198         /* IV goes at the end of the cryptop operation */
199         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
200                 sizeof(struct rte_crypto_sym_op);
201
202         ctx->sess = op_fns->sess_create(dev_id, options, test_vector, iv_offset);
203         if (ctx->sess == NULL)
204                 goto err;
205
206         snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
207                         dev_id);
208
209         ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
210                         options->pool_sz * options->segments_nb, 0, 0,
211                         RTE_PKTMBUF_HEADROOM +
212                         RTE_CACHE_LINE_ROUNDUP(
213                                 (options->max_buffer_size / options->segments_nb) +
214                                 (options->max_buffer_size % options->segments_nb) +
215                                         options->auth_digest_sz),
216                         rte_socket_id());
217
218         if (ctx->pkt_mbuf_pool_in == NULL)
219                 goto err;
220
221         /* Generate mbufs_in with plaintext populated for test */
222         ctx->mbufs_in = rte_malloc(NULL,
223                         (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
224
225         for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
226                 ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
227                                 ctx->pkt_mbuf_pool_in, options->segments_nb,
228                                 options, test_vector);
229                 if (ctx->mbufs_in[mbuf_idx] == NULL)
230                         goto err;
231         }
232
233         if (options->out_of_place == 1) {
234
235                 snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
236                                 dev_id);
237
238                 ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
239                                 pool_name, options->pool_sz, 0, 0,
240                                 RTE_PKTMBUF_HEADROOM +
241                                 RTE_CACHE_LINE_ROUNDUP(
242                                         options->max_buffer_size +
243                                         options->auth_digest_sz),
244                                 rte_socket_id());
245
246                 if (ctx->pkt_mbuf_pool_out == NULL)
247                         goto err;
248         }
249
250         ctx->mbufs_out = rte_malloc(NULL,
251                         (sizeof(struct rte_mbuf *) *
252                         ctx->options->pool_sz), 0);
253
254         for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
255                 if (options->out_of_place == 1) {
256                         ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
257                                         ctx->pkt_mbuf_pool_out, 1,
258                                         options, test_vector);
259                         if (ctx->mbufs_out[mbuf_idx] == NULL)
260                                 goto err;
261                 } else {
262                         ctx->mbufs_out[mbuf_idx] = NULL;
263                 }
264         }
265
266         snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
267                         dev_id);
268
269         uint16_t priv_size = test_vector->cipher_iv.length +
270                 test_vector->auth_iv.length;
271
272         ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
273                         RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
274                         512, priv_size, rte_socket_id());
275         if (ctx->crypto_op_pool == NULL)
276                 goto err;
277
278         return ctx;
279 err:
280         cperf_throughput_test_free(ctx, mbuf_idx);
281
282         return NULL;
283 }
284
285 int
286 cperf_throughput_test_runner(void *test_ctx)
287 {
288         struct cperf_throughput_ctx *ctx = test_ctx;
289         uint16_t test_burst_size;
290         uint8_t burst_size_idx = 0;
291
292         static int only_once;
293
294         struct rte_crypto_op *ops[ctx->options->max_burst_size];
295         struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
296         uint64_t i;
297
298         uint32_t lcore = rte_lcore_id();
299
300 #ifdef CPERF_LINEARIZATION_ENABLE
301         struct rte_cryptodev_info dev_info;
302         int linearize = 0;
303
304         /* Check if source mbufs require coalescing */
305         if (ctx->options->segments_nb > 1) {
306                 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
307                 if ((dev_info.feature_flags &
308                                 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
309                         linearize = 1;
310         }
311 #endif /* CPERF_LINEARIZATION_ENABLE */
312
313         ctx->lcore_id = lcore;
314
315         /* Warm up the host CPU before starting the test */
316         for (i = 0; i < ctx->options->total_ops; i++)
317                 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
318
319         /* Get first size from range or list */
320         if (ctx->options->inc_burst_size != 0)
321                 test_burst_size = ctx->options->min_burst_size;
322         else
323                 test_burst_size = ctx->options->burst_size_list[0];
324
325         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
326                 sizeof(struct rte_crypto_sym_op);
327
328         while (test_burst_size <= ctx->options->max_burst_size) {
329                 uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
330                 uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
331
332                 uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
333
334                 uint16_t ops_unused = 0;
335
336                 tsc_start = rte_rdtsc_precise();
337
338                 while (ops_enqd_total < ctx->options->total_ops) {
339
340                         uint16_t burst_size = ((ops_enqd_total + test_burst_size)
341                                         <= ctx->options->total_ops) ?
342                                                         test_burst_size :
343                                                         ctx->options->total_ops -
344                                                         ops_enqd_total;
345
346                         uint16_t ops_needed = burst_size - ops_unused;
347
348                         /* Allocate crypto ops from pool */
349                         if (ops_needed != rte_crypto_op_bulk_alloc(
350                                         ctx->crypto_op_pool,
351                                         RTE_CRYPTO_OP_TYPE_SYMMETRIC,
352                                         ops, ops_needed))
353                                 return -1;
354
355                         /* Setup crypto op, attach mbuf etc */
356                         (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
357                                         &ctx->mbufs_out[m_idx],
358                                         ops_needed, ctx->sess, ctx->options,
359                                         ctx->test_vector, iv_offset);
360
361                         /**
362                          * When ops_needed is smaller than ops_enqd, the
363                          * unused ops need to be moved to the front for
364                          * next round use.
365                          */
366                         if (unlikely(ops_enqd > ops_needed)) {
367                                 size_t nb_b_to_mov = ops_unused * sizeof(
368                                                 struct rte_crypto_op *);
369
370                                 memmove(&ops[ops_needed], &ops[ops_enqd],
371                                         nb_b_to_mov);
372                         }
373
374 #ifdef CPERF_LINEARIZATION_ENABLE
375                         if (linearize) {
376                                 /* PMD doesn't support scatter-gather and source buffer
377                                  * is segmented.
378                                  * We need to linearize it before enqueuing.
379                                  */
380                                 for (i = 0; i < burst_size; i++)
381                                         rte_pktmbuf_linearize(ops[i]->sym->m_src);
382                         }
383 #endif /* CPERF_LINEARIZATION_ENABLE */
384
385                         /* Enqueue burst of ops on crypto device */
386                         ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
387                                         ops, burst_size);
388                         if (ops_enqd < burst_size)
389                                 ops_enqd_failed++;
390
391                         /**
392                          * Calculate number of ops not enqueued (mainly for hw
393                          * accelerators whose ingress queue can fill up).
394                          */
395                         ops_unused = burst_size - ops_enqd;
396                         ops_enqd_total += ops_enqd;
397
398
399                         /* Dequeue processed burst of ops from crypto device */
400                         ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
401                                         ops_processed, test_burst_size);
402
403                         if (likely(ops_deqd))  {
404                                 /* free crypto ops so they can be reused. We don't free
405                                  * the mbufs here as we don't want to reuse them as
406                                  * the crypto operation will change the data and cause
407                                  * failures.
408                                  */
409                                 rte_mempool_put_bulk(ctx->crypto_op_pool,
410                                                 (void **)ops_processed, ops_deqd);
411
412                                 ops_deqd_total += ops_deqd;
413                         } else {
414                                 /**
415                                  * Count dequeue polls which didn't return any
416                                  * processed operations. This statistic is mainly
417                                  * relevant to hw accelerators.
418                                  */
419                                 ops_deqd_failed++;
420                         }
421
422                         m_idx += ops_needed;
423                         m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
424                                         0 : m_idx;
425                 }
426
427                 /* Dequeue any operations still in the crypto device */
428
429                 while (ops_deqd_total < ctx->options->total_ops) {
430                         /* Sending 0 length burst to flush sw crypto device */
431                         rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
432
433                         /* dequeue burst */
434                         ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
435                                         ops_processed, test_burst_size);
436                         if (ops_deqd == 0)
437                                 ops_deqd_failed++;
438                         else {
439                                 rte_mempool_put_bulk(ctx->crypto_op_pool,
440                                                 (void **)ops_processed, ops_deqd);
441
442                                 ops_deqd_total += ops_deqd;
443                         }
444                 }
445
446                 tsc_end = rte_rdtsc_precise();
447                 tsc_duration = (tsc_end - tsc_start);
448
449                 /* Calculate average operations processed per second */
450                 double ops_per_second = ((double)ctx->options->total_ops /
451                                 tsc_duration) * rte_get_tsc_hz();
452
453                 /* Calculate average throughput (Gbps) in bits per second */
454                 double throughput_gbps = ((ops_per_second *
455                                 ctx->options->test_buffer_size * 8) / 1000000000);
456
457                 /* Calculate average cycles per packet */
458                 double cycles_per_packet = ((double)tsc_duration /
459                                 ctx->options->total_ops);
460
461                 if (!ctx->options->csv) {
462                         if (!only_once)
463                                 printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
464                                         "lcore id", "Buf Size", "Burst Size",
465                                         "Enqueued", "Dequeued", "Failed Enq",
466                                         "Failed Deq", "MOps", "Gbps",
467                                         "Cycles/Buf");
468                         only_once = 1;
469
470                         printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
471                                         "%12"PRIu64"%12.4f%12.4f%12.2f\n",
472                                         ctx->lcore_id,
473                                         ctx->options->test_buffer_size,
474                                         test_burst_size,
475                                         ops_enqd_total,
476                                         ops_deqd_total,
477                                         ops_enqd_failed,
478                                         ops_deqd_failed,
479                                         ops_per_second/1000000,
480                                         throughput_gbps,
481                                         cycles_per_packet);
482                 } else {
483                         if (!only_once)
484                                 printf("# lcore id, Buffer Size(B),"
485                                         "Burst Size,Enqueued,Dequeued,Failed Enq,"
486                                         "Failed Deq,Ops(Millions),Throughput(Gbps),"
487                                         "Cycles/Buf\n\n");
488                         only_once = 1;
489
490                         printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
491                                         "%.f3;%.f3;%.f3\n",
492                                         ctx->lcore_id,
493                                         ctx->options->test_buffer_size,
494                                         test_burst_size,
495                                         ops_enqd_total,
496                                         ops_deqd_total,
497                                         ops_enqd_failed,
498                                         ops_deqd_failed,
499                                         ops_per_second/1000000,
500                                         throughput_gbps,
501                                         cycles_per_packet);
502                 }
503
504                 /* Get next size from range or list */
505                 if (ctx->options->inc_burst_size != 0)
506                         test_burst_size += ctx->options->inc_burst_size;
507                 else {
508                         if (++burst_size_idx == ctx->options->burst_size_count)
509                                 break;
510                         test_burst_size = ctx->options->burst_size_list[burst_size_idx];
511                 }
512
513         }
514
515         return 0;
516 }
517
518
519 void
520 cperf_throughput_test_destructor(void *arg)
521 {
522         struct cperf_throughput_ctx *ctx = arg;
523
524         if (ctx == NULL)
525                 return;
526
527         cperf_throughput_test_free(ctx, ctx->options->pool_sz);
528 }