b8814b6bcb5be0fb2edafcd667b57486330c54c8
[dpdk.git] / app / test-crypto-perf / cperf_test_verify.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_malloc.h>
34 #include <rte_cycles.h>
35 #include <rte_crypto.h>
36 #include <rte_cryptodev.h>
37
38 #include "cperf_test_verify.h"
39 #include "cperf_ops.h"
40 #include "cperf_test_common.h"
41
42 struct cperf_verify_ctx {
43         uint8_t dev_id;
44         uint16_t qp_id;
45         uint8_t lcore_id;
46
47         struct rte_mempool *pkt_mbuf_pool_in;
48         struct rte_mempool *pkt_mbuf_pool_out;
49         struct rte_mbuf **mbufs_in;
50         struct rte_mbuf **mbufs_out;
51
52         struct rte_mempool *crypto_op_pool;
53
54         struct rte_cryptodev_sym_session *sess;
55
56         cperf_populate_ops_t populate_ops;
57
58         const struct cperf_options *options;
59         const struct cperf_test_vector *test_vector;
60 };
61
62 struct cperf_op_result {
63         enum rte_crypto_op_status status;
64 };
65
66 static void
67 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
68 {
69         if (ctx) {
70                 if (ctx->sess) {
71                         rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
72                         rte_cryptodev_sym_session_free(ctx->sess);
73                 }
74
75                 cperf_free_common_memory(ctx->options,
76                                 ctx->pkt_mbuf_pool_in,
77                                 ctx->pkt_mbuf_pool_out,
78                                 ctx->mbufs_in, ctx->mbufs_out,
79                                 ctx->crypto_op_pool);
80
81                 rte_free(ctx);
82         }
83 }
84
85 void *
86 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
87                 uint8_t dev_id, uint16_t qp_id,
88                 const struct cperf_options *options,
89                 const struct cperf_test_vector *test_vector,
90                 const struct cperf_op_fns *op_fns)
91 {
92         struct cperf_verify_ctx *ctx = NULL;
93
94         ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
95         if (ctx == NULL)
96                 goto err;
97
98         ctx->dev_id = dev_id;
99         ctx->qp_id = qp_id;
100
101         ctx->populate_ops = op_fns->populate_ops;
102         ctx->options = options;
103         ctx->test_vector = test_vector;
104
105         /* IV goes at the end of the cryptop operation */
106         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
107                 sizeof(struct rte_crypto_sym_op);
108
109         ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
110                         iv_offset);
111         if (ctx->sess == NULL)
112                 goto err;
113
114         if (cperf_alloc_common_memory(options, test_vector, dev_id, 0,
115                         &ctx->pkt_mbuf_pool_in, &ctx->pkt_mbuf_pool_out,
116                         &ctx->mbufs_in, &ctx->mbufs_out,
117                         &ctx->crypto_op_pool) < 0)
118                 goto err;
119
120         return ctx;
121 err:
122         cperf_verify_test_free(ctx);
123
124         return NULL;
125 }
126
127 static int
128 cperf_verify_op(struct rte_crypto_op *op,
129                 const struct cperf_options *options,
130                 const struct cperf_test_vector *vector)
131 {
132         const struct rte_mbuf *m;
133         uint32_t len;
134         uint16_t nb_segs;
135         uint8_t *data;
136         uint32_t cipher_offset, auth_offset;
137         uint8_t cipher, auth;
138         int res = 0;
139
140         if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
141                 return 1;
142
143         if (op->sym->m_dst)
144                 m = op->sym->m_dst;
145         else
146                 m = op->sym->m_src;
147         nb_segs = m->nb_segs;
148         len = 0;
149         while (m && nb_segs != 0) {
150                 len += m->data_len;
151                 m = m->next;
152                 nb_segs--;
153         }
154
155         data = rte_malloc(NULL, len, 0);
156         if (data == NULL)
157                 return 1;
158
159         if (op->sym->m_dst)
160                 m = op->sym->m_dst;
161         else
162                 m = op->sym->m_src;
163         nb_segs = m->nb_segs;
164         len = 0;
165         while (m && nb_segs != 0) {
166                 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
167                                 m->data_len);
168                 len += m->data_len;
169                 m = m->next;
170                 nb_segs--;
171         }
172
173         switch (options->op_type) {
174         case CPERF_CIPHER_ONLY:
175                 cipher = 1;
176                 cipher_offset = 0;
177                 auth = 0;
178                 auth_offset = 0;
179                 break;
180         case CPERF_CIPHER_THEN_AUTH:
181                 cipher = 1;
182                 cipher_offset = 0;
183                 auth = 1;
184                 auth_offset = options->test_buffer_size;
185                 break;
186         case CPERF_AUTH_ONLY:
187                 cipher = 0;
188                 cipher_offset = 0;
189                 auth = 1;
190                 auth_offset = options->test_buffer_size;
191                 break;
192         case CPERF_AUTH_THEN_CIPHER:
193                 cipher = 1;
194                 cipher_offset = 0;
195                 auth = 1;
196                 auth_offset = options->test_buffer_size;
197                 break;
198         case CPERF_AEAD:
199                 cipher = 1;
200                 cipher_offset = vector->aad.length;
201                 auth = 1;
202                 auth_offset = vector->aad.length + options->test_buffer_size;
203                 break;
204         default:
205                 res = 1;
206                 goto out;
207         }
208
209         if (cipher == 1) {
210                 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
211                         res += memcmp(data + cipher_offset,
212                                         vector->ciphertext.data,
213                                         options->test_buffer_size);
214                 else
215                         res += memcmp(data + cipher_offset,
216                                         vector->plaintext.data,
217                                         options->test_buffer_size);
218         }
219
220         if (auth == 1) {
221                 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
222                         res += memcmp(data + auth_offset,
223                                         vector->digest.data,
224                                         options->digest_sz);
225         }
226
227 out:
228         rte_free(data);
229         return !!res;
230 }
231
232 int
233 cperf_verify_test_runner(void *test_ctx)
234 {
235         struct cperf_verify_ctx *ctx = test_ctx;
236
237         uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
238         uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
239         uint64_t ops_failed = 0;
240
241         static int only_once;
242
243         uint64_t i, m_idx = 0;
244         uint16_t ops_unused = 0;
245
246         struct rte_crypto_op *ops[ctx->options->max_burst_size];
247         struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
248
249         uint32_t lcore = rte_lcore_id();
250
251 #ifdef CPERF_LINEARIZATION_ENABLE
252         struct rte_cryptodev_info dev_info;
253         int linearize = 0;
254
255         /* Check if source mbufs require coalescing */
256         if (ctx->options->segments_nb > 1) {
257                 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
258                 if ((dev_info.feature_flags &
259                                 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
260                         linearize = 1;
261         }
262 #endif /* CPERF_LINEARIZATION_ENABLE */
263
264         ctx->lcore_id = lcore;
265
266         if (!ctx->options->csv)
267                 printf("\n# Running verify test on device: %u, lcore: %u\n",
268                         ctx->dev_id, lcore);
269
270         uint16_t iv_offset = sizeof(struct rte_crypto_op) +
271                 sizeof(struct rte_crypto_sym_op);
272
273         while (ops_enqd_total < ctx->options->total_ops) {
274
275                 uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
276                                 <= ctx->options->total_ops) ?
277                                                 ctx->options->max_burst_size :
278                                                 ctx->options->total_ops -
279                                                 ops_enqd_total;
280
281                 uint16_t ops_needed = burst_size - ops_unused;
282
283                 /* Allocate crypto ops from pool */
284                 if (ops_needed != rte_crypto_op_bulk_alloc(
285                                 ctx->crypto_op_pool,
286                                 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
287                                 ops, ops_needed)) {
288                         RTE_LOG(ERR, USER1,
289                                 "Failed to allocate more crypto operations "
290                                 "from the the crypto operation pool.\n"
291                                 "Consider increasing the pool size "
292                                 "with --pool-sz\n");
293                         return -1;
294                 }
295
296                 /* Setup crypto op, attach mbuf etc */
297                 (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
298                                 &ctx->mbufs_out[m_idx],
299                                 ops_needed, ctx->sess, ctx->options,
300                                 ctx->test_vector, iv_offset);
301
302 #ifdef CPERF_LINEARIZATION_ENABLE
303                 if (linearize) {
304                         /* PMD doesn't support scatter-gather and source buffer
305                          * is segmented.
306                          * We need to linearize it before enqueuing.
307                          */
308                         for (i = 0; i < burst_size; i++)
309                                 rte_pktmbuf_linearize(ops[i]->sym->m_src);
310                 }
311 #endif /* CPERF_LINEARIZATION_ENABLE */
312
313                 /* Enqueue burst of ops on crypto device */
314                 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
315                                 ops, burst_size);
316                 if (ops_enqd < burst_size)
317                         ops_enqd_failed++;
318
319                 /**
320                  * Calculate number of ops not enqueued (mainly for hw
321                  * accelerators whose ingress queue can fill up).
322                  */
323                 ops_unused = burst_size - ops_enqd;
324                 ops_enqd_total += ops_enqd;
325
326
327                 /* Dequeue processed burst of ops from crypto device */
328                 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
329                                 ops_processed, ctx->options->max_burst_size);
330
331                 m_idx += ops_needed;
332                 if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
333                         m_idx = 0;
334
335                 if (ops_deqd == 0) {
336                         /**
337                          * Count dequeue polls which didn't return any
338                          * processed operations. This statistic is mainly
339                          * relevant to hw accelerators.
340                          */
341                         ops_deqd_failed++;
342                         continue;
343                 }
344
345                 for (i = 0; i < ops_deqd; i++) {
346                         if (cperf_verify_op(ops_processed[i], ctx->options,
347                                                 ctx->test_vector))
348                                 ops_failed++;
349                         /* free crypto ops so they can be reused. We don't free
350                          * the mbufs here as we don't want to reuse them as
351                          * the crypto operation will change the data and cause
352                          * failures.
353                          */
354                         rte_crypto_op_free(ops_processed[i]);
355                 }
356                 ops_deqd_total += ops_deqd;
357         }
358
359         /* Dequeue any operations still in the crypto device */
360
361         while (ops_deqd_total < ctx->options->total_ops) {
362                 /* Sending 0 length burst to flush sw crypto device */
363                 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
364
365                 /* dequeue burst */
366                 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
367                                 ops_processed, ctx->options->max_burst_size);
368                 if (ops_deqd == 0) {
369                         ops_deqd_failed++;
370                         continue;
371                 }
372
373                 for (i = 0; i < ops_deqd; i++) {
374                         if (cperf_verify_op(ops_processed[i], ctx->options,
375                                                 ctx->test_vector))
376                                 ops_failed++;
377                         /* free crypto ops so they can be reused. We don't free
378                          * the mbufs here as we don't want to reuse them as
379                          * the crypto operation will change the data and cause
380                          * failures.
381                          */
382                         rte_crypto_op_free(ops_processed[i]);
383                 }
384                 ops_deqd_total += ops_deqd;
385         }
386
387         if (!ctx->options->csv) {
388                 if (!only_once)
389                         printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
390                                 "lcore id", "Buf Size", "Burst size",
391                                 "Enqueued", "Dequeued", "Failed Enq",
392                                 "Failed Deq", "Failed Ops");
393                 only_once = 1;
394
395                 printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
396                                 "%12"PRIu64"%12"PRIu64"\n",
397                                 ctx->lcore_id,
398                                 ctx->options->max_buffer_size,
399                                 ctx->options->max_burst_size,
400                                 ops_enqd_total,
401                                 ops_deqd_total,
402                                 ops_enqd_failed,
403                                 ops_deqd_failed,
404                                 ops_failed);
405         } else {
406                 if (!only_once)
407                         printf("\n# lcore id, Buffer Size(B), "
408                                 "Burst Size,Enqueued,Dequeued,Failed Enq,"
409                                 "Failed Deq,Failed Ops\n");
410                 only_once = 1;
411
412                 printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
413                                 "%"PRIu64"\n",
414                                 ctx->lcore_id,
415                                 ctx->options->max_buffer_size,
416                                 ctx->options->max_burst_size,
417                                 ops_enqd_total,
418                                 ops_deqd_total,
419                                 ops_enqd_failed,
420                                 ops_deqd_failed,
421                                 ops_failed);
422         }
423
424         return 0;
425 }
426
427
428
429 void
430 cperf_verify_test_destructor(void *arg)
431 {
432         struct cperf_verify_ctx *ctx = arg;
433
434         if (ctx == NULL)
435                 return;
436
437         rte_cryptodev_stop(ctx->dev_id);
438
439         cperf_verify_test_free(ctx);
440 }