be157e63b09947e2ce296598636d9651ee86b967
[dpdk.git] / app / test-crypto-perf / cperf_test_verify.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37
38 #include "cperf_test_verify.h"
39 #include "cperf_ops.h"
40
41 struct cperf_verify_results {
42         uint64_t ops_enqueued;
43         uint64_t ops_dequeued;
44
45         uint64_t ops_enqueued_failed;
46         uint64_t ops_dequeued_failed;
47
48         uint64_t ops_failed;
49 };
50
51 struct cperf_verify_ctx {
52         uint8_t dev_id;
53         uint16_t qp_id;
54         uint8_t lcore_id;
55
56         struct rte_mempool *pkt_mbuf_pool_in;
57         struct rte_mempool *pkt_mbuf_pool_out;
58         struct rte_mbuf **mbufs_in;
59         struct rte_mbuf **mbufs_out;
60
61         struct rte_mempool *crypto_op_pool;
62
63         struct rte_cryptodev_sym_session *sess;
64
65         cperf_populate_ops_t populate_ops;
66
67         const struct cperf_options *options;
68         const struct cperf_test_vector *test_vector;
69         struct cperf_verify_results results;
70
71 };
72
73 struct cperf_op_result {
74         enum rte_crypto_op_status status;
75 };
76
77 static void
78 cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
79 {
80         uint32_t i;
81
82         if (ctx) {
83                 if (ctx->sess)
84                         rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
85
86                 if (ctx->mbufs_in) {
87                         for (i = 0; i < mbuf_nb; i++)
88                                 rte_pktmbuf_free(ctx->mbufs_in[i]);
89
90                         rte_free(ctx->mbufs_in);
91                 }
92
93                 if (ctx->mbufs_out) {
94                         for (i = 0; i < mbuf_nb; i++) {
95                                 if (ctx->mbufs_out[i] != NULL)
96                                         rte_pktmbuf_free(ctx->mbufs_out[i]);
97                         }
98
99                         rte_free(ctx->mbufs_out);
100                 }
101
102                 if (ctx->pkt_mbuf_pool_in)
103                         rte_mempool_free(ctx->pkt_mbuf_pool_in);
104
105                 if (ctx->pkt_mbuf_pool_out)
106                         rte_mempool_free(ctx->pkt_mbuf_pool_out);
107
108                 if (ctx->crypto_op_pool)
109                         rte_mempool_free(ctx->crypto_op_pool);
110
111                 rte_free(ctx);
112         }
113 }
114
115 static struct rte_mbuf *
116 cperf_mbuf_create(struct rte_mempool *mempool,
117                 uint32_t segments_nb,
118                 const struct cperf_options *options,
119                 const struct cperf_test_vector *test_vector)
120 {
121         struct rte_mbuf *mbuf;
122         uint32_t segment_sz = options->buffer_sz / segments_nb;
123         uint32_t last_sz = options->buffer_sz % segments_nb;
124         uint8_t *mbuf_data;
125         uint8_t *test_data =
126                         (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
127                                         test_vector->plaintext.data :
128                                         test_vector->ciphertext.data;
129
130         mbuf = rte_pktmbuf_alloc(mempool);
131         if (mbuf == NULL)
132                 goto error;
133
134         mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
135         if (mbuf_data == NULL)
136                 goto error;
137
138         memcpy(mbuf_data, test_data, segment_sz);
139         test_data += segment_sz;
140         segments_nb--;
141
142         while (segments_nb) {
143                 struct rte_mbuf *m;
144
145                 m = rte_pktmbuf_alloc(mempool);
146                 if (m == NULL)
147                         goto error;
148
149                 rte_pktmbuf_chain(mbuf, m);
150
151                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
152                 if (mbuf_data == NULL)
153                         goto error;
154
155                 memcpy(mbuf_data, test_data, segment_sz);
156                 test_data += segment_sz;
157                 segments_nb--;
158         }
159
160         if (last_sz) {
161                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
162                 if (mbuf_data == NULL)
163                         goto error;
164
165                 memcpy(mbuf_data, test_data, last_sz);
166         }
167
168         if (options->op_type != CPERF_CIPHER_ONLY) {
169                 mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
170                                 options->auth_digest_sz);
171                 if (mbuf_data == NULL)
172                         goto error;
173         }
174
175         if (options->op_type == CPERF_AEAD) {
176                 uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
177                         RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
178
179                 if (aead == NULL)
180                         goto error;
181
182                 memcpy(aead, test_vector->aad.data, test_vector->aad.length);
183         }
184
185         return mbuf;
186 error:
187         if (mbuf != NULL)
188                 rte_pktmbuf_free(mbuf);
189
190         return NULL;
191 }
192
193 void *
194 cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
195                 const struct cperf_options *options,
196                 const struct cperf_test_vector *test_vector,
197                 const struct cperf_op_fns *op_fns)
198 {
199         struct cperf_verify_ctx *ctx = NULL;
200         unsigned int mbuf_idx = 0;
201         char pool_name[32] = "";
202
203         ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
204         if (ctx == NULL)
205                 goto err;
206
207         ctx->dev_id = dev_id;
208         ctx->qp_id = qp_id;
209
210         ctx->populate_ops = op_fns->populate_ops;
211         ctx->options = options;
212         ctx->test_vector = test_vector;
213
214         ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
215         if (ctx->sess == NULL)
216                 goto err;
217
218         snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
219                         dev_id);
220
221         ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
222                         options->pool_sz * options->segments_nb, 0, 0,
223                         RTE_PKTMBUF_HEADROOM +
224                         RTE_CACHE_LINE_ROUNDUP(
225                                 (options->buffer_sz / options->segments_nb) +
226                                 (options->buffer_sz % options->segments_nb) +
227                                         options->auth_digest_sz),
228                         rte_socket_id());
229
230         if (ctx->pkt_mbuf_pool_in == NULL)
231                 goto err;
232
233         /* Generate mbufs_in with plaintext populated for test */
234         if (ctx->options->pool_sz % ctx->options->burst_sz)
235                 goto err;
236
237         ctx->mbufs_in = rte_malloc(NULL,
238                         (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
239
240         for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
241                 ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
242                                 ctx->pkt_mbuf_pool_in, options->segments_nb,
243                                 options, test_vector);
244                 if (ctx->mbufs_in[mbuf_idx] == NULL)
245                         goto err;
246         }
247
248         if (options->out_of_place == 1) {
249
250                 snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
251                                 dev_id);
252
253                 ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
254                                 pool_name, options->pool_sz, 0, 0,
255                                 RTE_PKTMBUF_HEADROOM +
256                                 RTE_CACHE_LINE_ROUNDUP(
257                                         options->buffer_sz +
258                                         options->auth_digest_sz),
259                                 rte_socket_id());
260
261                 if (ctx->pkt_mbuf_pool_out == NULL)
262                         goto err;
263         }
264
265         ctx->mbufs_out = rte_malloc(NULL,
266                         (sizeof(struct rte_mbuf *) *
267                         ctx->options->pool_sz), 0);
268
269         for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
270                 if (options->out_of_place == 1) {
271                         ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
272                                         ctx->pkt_mbuf_pool_out, 1,
273                                         options, test_vector);
274                         if (ctx->mbufs_out[mbuf_idx] == NULL)
275                                 goto err;
276                 } else {
277                         ctx->mbufs_out[mbuf_idx] = NULL;
278                 }
279         }
280
281         snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
282                         dev_id);
283
284         ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
285                         RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
286                         rte_socket_id());
287         if (ctx->crypto_op_pool == NULL)
288                 goto err;
289
290         return ctx;
291 err:
292         cperf_verify_test_free(ctx, mbuf_idx);
293
294         return NULL;
295 }
296
297 static int
298 cperf_verify_op(struct rte_crypto_op *op,
299                 const struct cperf_options *options,
300                 const struct cperf_test_vector *vector)
301 {
302         const struct rte_mbuf *m;
303         uint32_t len;
304         uint16_t nb_segs;
305         uint8_t *data;
306         uint32_t cipher_offset, auth_offset;
307         uint8_t cipher, auth;
308         int res = 0;
309
310         if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
311                 return 1;
312
313         if (op->sym->m_dst)
314                 m = op->sym->m_dst;
315         else
316                 m = op->sym->m_src;
317         nb_segs = m->nb_segs;
318         len = 0;
319         while (m && nb_segs != 0) {
320                 len += m->data_len;
321                 m = m->next;
322                 nb_segs--;
323         }
324
325         data = rte_malloc(NULL, len, 0);
326         if (data == NULL)
327                 return 1;
328
329         if (op->sym->m_dst)
330                 m = op->sym->m_dst;
331         else
332                 m = op->sym->m_src;
333         nb_segs = m->nb_segs;
334         len = 0;
335         while (m && nb_segs != 0) {
336                 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
337                                 m->data_len);
338                 len += m->data_len;
339                 m = m->next;
340                 nb_segs--;
341         }
342
343         switch (options->op_type) {
344         case CPERF_CIPHER_ONLY:
345                 cipher = 1;
346                 cipher_offset = 0;
347                 auth = 0;
348                 auth_offset = 0;
349                 break;
350         case CPERF_CIPHER_THEN_AUTH:
351                 cipher = 1;
352                 cipher_offset = 0;
353                 auth = 1;
354                 auth_offset = vector->plaintext.length;
355                 break;
356         case CPERF_AUTH_ONLY:
357                 cipher = 0;
358                 cipher_offset = 0;
359                 auth = 1;
360                 auth_offset = vector->plaintext.length;
361                 break;
362         case CPERF_AUTH_THEN_CIPHER:
363                 cipher = 1;
364                 cipher_offset = 0;
365                 auth = 1;
366                 auth_offset = vector->plaintext.length;
367                 break;
368         case CPERF_AEAD:
369                 cipher = 1;
370                 cipher_offset = vector->aad.length;
371                 auth = 1;
372                 auth_offset = vector->aad.length + vector->plaintext.length;
373                 break;
374         }
375
376         if (cipher == 1) {
377                 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
378                         res += memcmp(data + cipher_offset,
379                                         vector->ciphertext.data,
380                                         vector->ciphertext.length);
381                 else
382                         res += memcmp(data + cipher_offset,
383                                         vector->plaintext.data,
384                                         vector->plaintext.length);
385         }
386
387         if (auth == 1) {
388                 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
389                         res += memcmp(data + auth_offset,
390                                         vector->digest.data,
391                                         vector->digest.length);
392         }
393
394         return !!res;
395 }
396
397 int
398 cperf_verify_test_runner(void *test_ctx)
399 {
400         struct cperf_verify_ctx *ctx = test_ctx;
401
402         uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
403         uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
404
405         uint64_t i, m_idx = 0;
406         uint16_t ops_unused = 0;
407
408         struct rte_crypto_op *ops[ctx->options->burst_sz];
409         struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
410
411         uint32_t lcore = rte_lcore_id();
412
413 #ifdef CPERF_LINEARIZATION_ENABLE
414         struct rte_cryptodev_info dev_info;
415         int linearize = 0;
416
417         /* Check if source mbufs require coalescing */
418         if (ctx->options->segments_nb > 1) {
419                 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
420                 if ((dev_info.feature_flags &
421                                 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
422                         linearize = 1;
423         }
424 #endif /* CPERF_LINEARIZATION_ENABLE */
425
426         ctx->lcore_id = lcore;
427
428         if (!ctx->options->csv)
429                 printf("\n# Running verify test on device: %u, lcore: %u\n",
430                         ctx->dev_id, lcore);
431
432         while (ops_enqd_total < ctx->options->total_ops) {
433
434                 uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
435                                 <= ctx->options->total_ops) ?
436                                                 ctx->options->burst_sz :
437                                                 ctx->options->total_ops -
438                                                 ops_enqd_total;
439
440                 uint16_t ops_needed = burst_size - ops_unused;
441
442                 /* Allocate crypto ops from pool */
443                 if (ops_needed != rte_crypto_op_bulk_alloc(
444                                 ctx->crypto_op_pool,
445                                 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
446                                 ops, ops_needed))
447                         return -1;
448
449                 /* Setup crypto op, attach mbuf etc */
450                 (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
451                                 &ctx->mbufs_out[m_idx],
452                                 ops_needed, ctx->sess, ctx->options,
453                                 ctx->test_vector);
454
455 #ifdef CPERF_LINEARIZATION_ENABLE
456                 if (linearize) {
457                         /* PMD doesn't support scatter-gather and source buffer
458                          * is segmented.
459                          * We need to linearize it before enqueuing.
460                          */
461                         for (i = 0; i < burst_size; i++)
462                                 rte_pktmbuf_linearize(ops[i]->sym->m_src);
463                 }
464 #endif /* CPERF_LINEARIZATION_ENABLE */
465
466                 /* Enqueue burst of ops on crypto device */
467                 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
468                                 ops, burst_size);
469                 if (ops_enqd < burst_size)
470                         ops_enqd_failed++;
471
472                 /**
473                  * Calculate number of ops not enqueued (mainly for hw
474                  * accelerators whose ingress queue can fill up).
475                  */
476                 ops_unused = burst_size - ops_enqd;
477                 ops_enqd_total += ops_enqd;
478
479
480                 /* Dequeue processed burst of ops from crypto device */
481                 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
482                                 ops_processed, ctx->options->burst_sz);
483
484                 m_idx += ops_needed;
485                 if (m_idx + ctx->options->burst_sz > ctx->options->pool_sz)
486                         m_idx = 0;
487
488                 if (ops_deqd == 0) {
489                         /**
490                          * Count dequeue polls which didn't return any
491                          * processed operations. This statistic is mainly
492                          * relevant to hw accelerators.
493                          */
494                         ops_deqd_failed++;
495                         continue;
496                 }
497
498                 for (i = 0; i < ops_deqd; i++) {
499                         if (cperf_verify_op(ops_processed[i], ctx->options,
500                                                 ctx->test_vector))
501                                 ctx->results.ops_failed++;
502                         /* free crypto ops so they can be reused. We don't free
503                          * the mbufs here as we don't want to reuse them as
504                          * the crypto operation will change the data and cause
505                          * failures.
506                          */
507                         rte_crypto_op_free(ops_processed[i]);
508                         ops_deqd_total += ops_deqd;
509                 }
510         }
511
512         /* Dequeue any operations still in the crypto device */
513
514         while (ops_deqd_total < ctx->options->total_ops) {
515                 /* Sending 0 length burst to flush sw crypto device */
516                 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
517
518                 /* dequeue burst */
519                 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
520                                 ops_processed, ctx->options->burst_sz);
521                 if (ops_deqd == 0) {
522                         ops_deqd_failed++;
523                         continue;
524                 }
525
526                 for (i = 0; i < ops_deqd; i++) {
527                         if (cperf_verify_op(ops_processed[i], ctx->options,
528                                                 ctx->test_vector))
529                                 ctx->results.ops_failed++;
530                         /* free crypto ops so they can be reused. We don't free
531                          * the mbufs here as we don't want to reuse them as
532                          * the crypto operation will change the data and cause
533                          * failures.
534                          */
535                         rte_crypto_op_free(ops_processed[i]);
536                         ops_deqd_total += ops_deqd;
537                 }
538         }
539
540         ctx->results.ops_enqueued = ops_enqd_total;
541         ctx->results.ops_dequeued = ops_deqd_total;
542
543         ctx->results.ops_enqueued_failed = ops_enqd_failed;
544         ctx->results.ops_dequeued_failed = ops_deqd_failed;
545
546         return 0;
547 }
548
549
550
551 void
552 cperf_verify_test_destructor(void *arg)
553 {
554         struct cperf_verify_ctx *ctx = arg;
555         struct cperf_verify_results *results = &ctx->results;
556         static int only_once;
557
558         if (ctx == NULL)
559                 return;
560
561         if (!ctx->options->csv) {
562                 printf("\n# Device %d on lcore %u\n",
563                                 ctx->dev_id, ctx->lcore_id);
564                 printf("# Buffer Size(B)\t  Enqueued\t  Dequeued\tFailed Enq"
565                                 "\tFailed Deq\tEmpty Polls\n");
566
567                 printf("\n%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
568                                 "%10"PRIu64"\t%10"PRIu64"\n",
569                                 ctx->options->buffer_sz,
570                                 results->ops_enqueued,
571                                 results->ops_dequeued,
572                                 results->ops_enqueued_failed,
573                                 results->ops_dequeued_failed,
574                                 results->ops_failed);
575         } else {
576                 if (!only_once)
577                         printf("\n# CPU lcore id, Burst Size(B), "
578                                 "Buffer Size(B),Enqueued,Dequeued,Failed Enq,"
579                                 "Failed Deq,Empty Polls\n");
580                 only_once = 1;
581
582                 printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
583                                 "%"PRIu64"\n",
584                                 ctx->lcore_id,
585                                 ctx->options->burst_sz,
586                                 ctx->options->buffer_sz,
587                                 results->ops_enqueued,
588                                 results->ops_dequeued,
589                                 results->ops_enqueued_failed,
590                                 results->ops_dequeued_failed,
591                                 results->ops_failed);
592         }
593
594         cperf_verify_test_free(ctx, ctx->options->pool_sz);
595 }