c57d6722b83b6d758ed35637b94b705cfffa5091
[dpdk.git] / app / test-crypto-perf / cperf_test_verify.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37
38 #include "cperf_test_verify.h"
39 #include "cperf_ops.h"
40 #include "cperf_test_common.h"
41
42 struct cperf_verify_ctx {
43         uint8_t dev_id;
44         uint16_t qp_id;
45         uint8_t lcore_id;
46
47         struct rte_mempool *pkt_mbuf_pool_in;
48         struct rte_mempool *pkt_mbuf_pool_out;
49         struct rte_mbuf **mbufs_in;
50         struct rte_mbuf **mbufs_out;
51
52         struct rte_mempool *crypto_op_pool;
53
54         struct rte_cryptodev_sym_session *sess;
55
56         cperf_populate_ops_t populate_ops;
57
58         const struct cperf_options *options;
59         const struct cperf_test_vector *test_vector;
60 };
61
62 struct cperf_op_result {
63         enum rte_crypto_op_status status;
64 };
65
66 static void
67 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
68 {
69         if (ctx) {
70                 if (ctx->sess) {
71                         rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
72                         rte_cryptodev_sym_session_free(ctx->sess);
73                 }
74
75                 cperf_free_common_memory(ctx->options,
76                                 ctx->pkt_mbuf_pool_in,
77                                 ctx->pkt_mbuf_pool_out,
78                                 ctx->mbufs_in, ctx->mbufs_out,
79                                 ctx->crypto_op_pool);
80
81                 rte_free(ctx);
82         }
83 }
84
85 void *
86 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
87                 uint8_t dev_id, uint16_t qp_id,
88                 const struct cperf_options *options,
89                 const struct cperf_test_vector *test_vector,
90                 const struct cperf_op_fns *op_fns)
91 {
92         struct cperf_verify_ctx *ctx = NULL;
93
94         ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
95         if (ctx == NULL)
96                 goto err;
97
98         ctx->dev_id = dev_id;
99         ctx->qp_id = qp_id;
100
101         ctx->populate_ops = op_fns->populate_ops;
102         ctx->options = options;
103         ctx->test_vector = test_vector;
104
105         /* IV goes at the end of the cryptop operation */
106         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
107                 sizeof(struct rte_crypto_sym_op);
108
109         ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
110                         iv_offset);
111         if (ctx->sess == NULL)
112                 goto err;
113
114         if (cperf_alloc_common_memory(options, test_vector, dev_id, 0,
115                         &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
116                         &ctx->mbufs_in, &ctx->mbufs_out,
117                         &ctx->crypto_op_pool) < 0)
118                 goto err;
119
120         return ctx;
121 err:
122         cperf_verify_test_free(ctx);
123
124         return NULL;
125 }
126
127 static int
128 cperf_verify_op(struct rte_crypto_op *op,
129                 const struct cperf_options *options,
130                 const struct cperf_test_vector *vector)
131 {
132         const struct rte_mbuf *m;
133         uint32_t len;
134         uint16_t nb_segs;
135         uint8_t *data;
136         uint32_t cipher_offset, auth_offset;
137         uint8_t cipher, auth;
138         int res = 0;
139
140         if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
141                 return 1;
142
143         if (op->sym->m_dst)
144                 m = op->sym->m_dst;
145         else
146                 m = op->sym->m_src;
147         nb_segs = m->nb_segs;
148         len = 0;
149         while (m && nb_segs != 0) {
150                 len += m->data_len;
151                 m = m->next;
152                 nb_segs--;
153         }
154
155         data = rte_malloc(NULL, len, 0);
156         if (data == NULL)
157                 return 1;
158
159         if (op->sym->m_dst)
160                 m = op->sym->m_dst;
161         else
162                 m = op->sym->m_src;
163         nb_segs = m->nb_segs;
164         len = 0;
165         while (m && nb_segs != 0) {
166                 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
167                                 m->data_len);
168                 len += m->data_len;
169                 m = m->next;
170                 nb_segs--;
171         }
172
173         switch (options->op_type) {
174         case CPERF_CIPHER_ONLY:
175                 cipher = 1;
176                 cipher_offset = 0;
177                 auth = 0;
178                 auth_offset = 0;
179                 break;
180         case CPERF_CIPHER_THEN_AUTH:
181                 cipher = 1;
182                 cipher_offset = 0;
183                 auth = 1;
184                 auth_offset = options->test_buffer_size;
185                 break;
186         case CPERF_AUTH_ONLY:
187                 cipher = 0;
188                 cipher_offset = 0;
189                 auth = 1;
190                 auth_offset = options->test_buffer_size;
191                 break;
192         case CPERF_AUTH_THEN_CIPHER:
193                 cipher = 1;
194                 cipher_offset = 0;
195                 auth = 1;
196                 auth_offset = options->test_buffer_size;
197                 break;
198         case CPERF_AEAD:
199                 cipher = 1;
200                 cipher_offset = 0;
201                 auth = 1;
202                 auth_offset = options->test_buffer_size;
203                 break;
204         default:
205                 res = 1;
206                 goto out;
207         }
208
209         if (cipher == 1) {
210                 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
211                         res += memcmp(data + cipher_offset,
212                                         vector->ciphertext.data,
213                                         options->test_buffer_size);
214                 else
215                         res += memcmp(data + cipher_offset,
216                                         vector->plaintext.data,
217                                         options->test_buffer_size);
218         }
219
220         if (auth == 1) {
221                 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
222                         res += memcmp(data + auth_offset,
223                                         vector->digest.data,
224                                         options->digest_sz);
225         }
226
227 out:
228         rte_free(data);
229         return !!res;
230 }
231
232 static void
233 cperf_mbuf_set(struct rte_mbuf *mbuf,
234                 const struct cperf_options *options,
235                 const struct cperf_test_vector *test_vector)
236 {
237         uint32_t segment_sz = options->segment_sz;
238         uint8_t *mbuf_data;
239         uint8_t *test_data =
240                         (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
241                                         test_vector->plaintext.data :
242                                         test_vector->ciphertext.data;
243         uint32_t remaining_bytes = options->max_buffer_size;
244
245         while (remaining_bytes) {
246                 mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
247
248                 if (remaining_bytes <= segment_sz) {
249                         memcpy(mbuf_data, test_data, remaining_bytes);
250                         return;
251                 }
252
253                 memcpy(mbuf_data, test_data, segment_sz);
254                 remaining_bytes -= segment_sz;
255                 test_data += segment_sz;
256                 mbuf = mbuf->next;
257         }
258 }
259
260 int
261 cperf_verify_test_runner(void *test_ctx)
262 {
263         struct cperf_verify_ctx *ctx = test_ctx;
264
265         uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
266         uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
267         uint64_t ops_failed = 0;
268
269         static int only_once;
270
271         uint64_t i, m_idx = 0;
272         uint16_t ops_unused = 0;
273
274         struct rte_crypto_op *ops[ctx->options->max_burst_size];
275         struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
276
277         uint32_t lcore = rte_lcore_id();
278
279 #ifdef CPERF_LINEARIZATION_ENABLE
280         struct rte_cryptodev_info dev_info;
281         int linearize = 0;
282
283         /* Check if source mbufs require coalescing */
284         if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
285                 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
286                 if ((dev_info.feature_flags &
287                                 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
288                         linearize = 1;
289         }
290 #endif /* CPERF_LINEARIZATION_ENABLE */
291
292         ctx->lcore_id = lcore;
293
294         if (!ctx->options->csv)
295                 printf("\n# Running verify test on device: %u, lcore: %u\n",
296                         ctx->dev_id, lcore);
297
298         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
299                 sizeof(struct rte_crypto_sym_op);
300
301         while (ops_enqd_total < ctx->options->total_ops) {
302
303                 uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
304                                 <= ctx->options->total_ops) ?
305                                                 ctx->options->max_burst_size :
306                                                 ctx->options->total_ops -
307                                                 ops_enqd_total;
308
309                 uint16_t ops_needed = burst_size - ops_unused;
310
311                 /* Allocate crypto ops from pool */
312                 if (ops_needed != rte_crypto_op_bulk_alloc(
313                                 ctx->crypto_op_pool,
314                                 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
315                                 ops, ops_needed)) {
316                         RTE_LOG(ERR, USER1,
317                                 "Failed to allocate more crypto operations "
318                                 "from the the crypto operation pool.\n"
319                                 "Consider increasing the pool size "
320                                 "with --pool-sz\n");
321                         return -1;
322                 }
323
324                 /* Setup crypto op, attach mbuf etc */
325                 (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
326                                 &ctx->mbufs_out[m_idx],
327                                 ops_needed, ctx->sess, ctx->options,
328                                 ctx->test_vector, iv_offset);
329
330
331                 /* Populate the mbuf with the test vector, for verification */
332                 for (i = 0; i < ops_needed; i++)
333                         cperf_mbuf_set(ops[i]->sym->m_src,
334                                         ctx->options,
335                                         ctx->test_vector);
336
337 #ifdef CPERF_LINEARIZATION_ENABLE
338                 if (linearize) {
339                         /* PMD doesn't support scatter-gather and source buffer
340                          * is segmented.
341                          * We need to linearize it before enqueuing.
342                          */
343                         for (i = 0; i < burst_size; i++)
344                                 rte_pktmbuf_linearize(ops[i]->sym->m_src);
345                 }
346 #endif /* CPERF_LINEARIZATION_ENABLE */
347
348                 /* Enqueue burst of ops on crypto device */
349                 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
350                                 ops, burst_size);
351                 if (ops_enqd < burst_size)
352                         ops_enqd_failed++;
353
354                 /**
355                  * Calculate number of ops not enqueued (mainly for hw
356                  * accelerators whose ingress queue can fill up).
357                  */
358                 ops_unused = burst_size - ops_enqd;
359                 ops_enqd_total += ops_enqd;
360
361
362                 /* Dequeue processed burst of ops from crypto device */
363                 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
364                                 ops_processed, ctx->options->max_burst_size);
365
366                 m_idx += ops_needed;
367                 if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
368                         m_idx = 0;
369
370                 if (ops_deqd == 0) {
371                         /**
372                          * Count dequeue polls which didn't return any
373                          * processed operations. This statistic is mainly
374                          * relevant to hw accelerators.
375                          */
376                         ops_deqd_failed++;
377                         continue;
378                 }
379
380                 for (i = 0; i < ops_deqd; i++) {
381                         if (cperf_verify_op(ops_processed[i], ctx->options,
382                                                 ctx->test_vector))
383                                 ops_failed++;
384                         /* free crypto ops so they can be reused. We don't free
385                          * the mbufs here as we don't want to reuse them as
386                          * the crypto operation will change the data and cause
387                          * failures.
388                          */
389                         rte_crypto_op_free(ops_processed[i]);
390                 }
391                 ops_deqd_total += ops_deqd;
392         }
393
394         /* Dequeue any operations still in the crypto device */
395
396         while (ops_deqd_total < ctx->options->total_ops) {
397                 /* Sending 0 length burst to flush sw crypto device */
398                 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
399
400                 /* dequeue burst */
401                 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
402                                 ops_processed, ctx->options->max_burst_size);
403                 if (ops_deqd == 0) {
404                         ops_deqd_failed++;
405                         continue;
406                 }
407
408                 for (i = 0; i < ops_deqd; i++) {
409                         if (cperf_verify_op(ops_processed[i], ctx->options,
410                                                 ctx->test_vector))
411                                 ops_failed++;
412                         /* free crypto ops so they can be reused. We don't free
413                          * the mbufs here as we don't want to reuse them as
414                          * the crypto operation will change the data and cause
415                          * failures.
416                          */
417                         rte_crypto_op_free(ops_processed[i]);
418                 }
419                 ops_deqd_total += ops_deqd;
420         }
421
422         if (!ctx->options->csv) {
423                 if (!only_once)
424                         printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
425                                 "lcore id", "Buf Size", "Burst size",
426                                 "Enqueued", "Dequeued", "Failed Enq",
427                                 "Failed Deq", "Failed Ops");
428                 only_once = 1;
429
430                 printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
431                                 "%12"PRIu64"%12"PRIu64"\n",
432                                 ctx->lcore_id,
433                                 ctx->options->max_buffer_size,
434                                 ctx->options->max_burst_size,
435                                 ops_enqd_total,
436                                 ops_deqd_total,
437                                 ops_enqd_failed,
438                                 ops_deqd_failed,
439                                 ops_failed);
440         } else {
441                 if (!only_once)
442                         printf("\n# lcore id, Buffer Size(B), "
443                                 "Burst Size,Enqueued,Dequeued,Failed Enq,"
444                                 "Failed Deq,Failed Ops\n");
445                 only_once = 1;
446
447                 printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
448                                 "%"PRIu64"\n",
449                                 ctx->lcore_id,
450                                 ctx->options->max_buffer_size,
451                                 ctx->options->max_burst_size,
452                                 ops_enqd_total,
453                                 ops_deqd_total,
454                                 ops_enqd_failed,
455                                 ops_deqd_failed,
456                                 ops_failed);
457         }
458
459         return 0;
460 }
461
462
463
464 void
465 cperf_verify_test_destructor(void *arg)
466 {
467         struct cperf_verify_ctx *ctx = arg;
468
469         if (ctx == NULL)
470                 return;
471
472         rte_cryptodev_stop(ctx->dev_id);
473
474         cperf_verify_test_free(ctx);
475 }