1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
7 #include <rte_malloc.h>
10 #include <rte_cycles.h>
11 #include <rte_ipsec.h>
12 #include <rte_random.h>
15 #include "test_cryptodev.h"
17 #define RING_SIZE 4096
22 struct ipsec_test_cfg {
23 uint32_t replay_win_sz;
26 enum rte_crypto_sym_xform_type type;
29 struct rte_mempool *mbuf_pool, *cop_pool;
31 struct stats_counter {
32 uint64_t nb_prepare_call;
33 uint64_t nb_prepare_pkt;
34 uint64_t nb_process_call;
35 uint64_t nb_process_pkt;
36 uint64_t prepare_ticks_elapsed;
37 uint64_t process_ticks_elapsed;
41 struct rte_ipsec_session ss[2];
42 struct rte_ipsec_sa_prm sa_prm;
43 struct rte_security_ipsec_xform ipsec_xform;
44 struct rte_crypto_sym_xform cipher_xform;
45 struct rte_crypto_sym_xform auth_xform;
46 struct rte_crypto_sym_xform aead_xform;
47 struct rte_crypto_sym_xform *crypto_xforms;
48 struct rte_crypto_op *cop[BURST_SIZE];
49 enum rte_crypto_sym_xform_type type;
50 struct stats_counter cnt;
51 uint32_t replay_win_sz;
55 static const struct ipsec_test_cfg test_cfg[] = {
56 {0, 0, 0, RTE_CRYPTO_SYM_XFORM_AEAD},
57 {0, 0, 0, RTE_CRYPTO_SYM_XFORM_CIPHER},
58 {128, 1, 0, RTE_CRYPTO_SYM_XFORM_AEAD},
59 {128, 1, 0, RTE_CRYPTO_SYM_XFORM_CIPHER},
63 static struct rte_ipv4_hdr ipv4_outer = {
64 .version_ihl = IPVERSION << 4 |
65 sizeof(ipv4_outer) / RTE_IPV4_IHL_MULTIPLIER,
66 .time_to_live = IPDEFTTL,
67 .next_proto_id = IPPROTO_ESP,
68 .src_addr = RTE_IPV4(192, 168, 1, 100),
69 .dst_addr = RTE_IPV4(192, 168, 2, 100),
72 static struct rte_ring *ring_inb_prepare;
73 static struct rte_ring *ring_inb_process;
74 static struct rte_ring *ring_outb_prepare;
75 static struct rte_ring *ring_outb_process;
77 struct supported_cipher_algo {
79 enum rte_crypto_cipher_algorithm algo;
85 struct supported_auth_algo {
87 enum rte_crypto_auth_algorithm algo;
93 struct supported_aead_algo {
95 enum rte_crypto_aead_algorithm algo;
103 const struct supported_cipher_algo cipher_algo[] = {
105 .keyword = "aes-128-cbc",
106 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
113 const struct supported_auth_algo auth_algo[] = {
115 .keyword = "sha1-hmac",
116 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
122 const struct supported_aead_algo aead_algo[] = {
124 .keyword = "aes-128-gcm",
125 .algo = RTE_CRYPTO_AEAD_AES_GCM,
134 static struct rte_mbuf *generate_mbuf_data(struct rte_mempool *mpool)
136 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mpool);
147 fill_ipsec_param(struct ipsec_sa *sa)
149 struct rte_ipsec_sa_prm *prm = &sa->sa_prm;
151 memset(prm, 0, sizeof(*prm));
153 prm->flags = sa->sa_flags;
155 /* setup ipsec xform */
156 prm->ipsec_xform = sa->ipsec_xform;
157 prm->ipsec_xform.salt = (uint32_t)rte_rand();
158 prm->ipsec_xform.replay_win_sz = sa->replay_win_sz;
160 /* setup tunnel related fields */
161 prm->tun.hdr_len = sizeof(ipv4_outer);
162 prm->tun.next_proto = IPPROTO_IPIP;
163 prm->tun.hdr = &ipv4_outer;
165 if (sa->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
166 sa->aead_xform.type = sa->type;
167 sa->aead_xform.aead.algo = aead_algo->algo;
168 sa->aead_xform.next = NULL;
169 sa->aead_xform.aead.digest_length = aead_algo->digest_len;
170 sa->aead_xform.aead.iv.offset = IV_OFFSET;
171 sa->aead_xform.aead.iv.length = 12;
173 if (sa->ipsec_xform.direction ==
174 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
175 sa->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
177 sa->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
180 sa->crypto_xforms = &sa->aead_xform;
182 sa->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
183 sa->cipher_xform.cipher.algo = cipher_algo->algo;
184 sa->cipher_xform.cipher.iv.offset = IV_OFFSET;
185 sa->cipher_xform.cipher.iv.length = 12;
186 sa->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
187 sa->auth_xform.auth.algo = auth_algo->algo;
188 sa->auth_xform.auth.digest_length = auth_algo->digest_len;
191 if (sa->ipsec_xform.direction ==
192 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
193 sa->cipher_xform.cipher.op =
194 RTE_CRYPTO_CIPHER_OP_DECRYPT;
195 sa->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
196 sa->cipher_xform.next = NULL;
197 sa->auth_xform.next = &sa->cipher_xform;
198 sa->crypto_xforms = &sa->auth_xform;
200 sa->cipher_xform.cipher.op =
201 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
202 sa->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
203 sa->auth_xform.next = NULL;
204 sa->cipher_xform.next = &sa->auth_xform;
205 sa->crypto_xforms = &sa->cipher_xform;
209 prm->crypto_xform = sa->crypto_xforms;
215 create_sa(enum rte_security_session_action_type action_type,
218 static struct rte_cryptodev_sym_session dummy_ses;
222 memset(&sa->ss[0], 0, sizeof(sa->ss[0]));
224 rc = fill_ipsec_param(sa);
226 printf("failed to fill ipsec param\n");
230 sz = rte_ipsec_sa_size(&sa->sa_prm);
231 TEST_ASSERT(sz > 0, "rte_ipsec_sa_size() failed\n");
233 sa->ss[0].sa = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
234 TEST_ASSERT_NOT_NULL(sa->ss[0].sa,
235 "failed to allocate memory for rte_ipsec_sa\n");
237 sa->ss[0].type = action_type;
238 sa->ss[0].crypto.ses = &dummy_ses;
240 rc = rte_ipsec_sa_init(sa->ss[0].sa, &sa->sa_prm, sz);
241 rc = (rc > 0 && (uint32_t)rc <= sz) ? 0 : -EINVAL;
244 rc = rte_ipsec_session_prepare(&sa->ss[0]);
252 packet_prepare(struct rte_mbuf **buf, struct ipsec_sa *sa,
258 for (i = 0; i < num_pkts; i++) {
260 sa->cop[i] = rte_crypto_op_alloc(cop_pool,
261 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
263 if (sa->cop[i] == NULL) {
266 "Failed to allocate symmetric crypto op\n");
272 time_stamp = rte_rdtsc_precise();
274 k = rte_ipsec_pkt_crypto_prepare(&sa->ss[0], buf,
277 time_stamp = rte_rdtsc_precise() - time_stamp;
280 RTE_LOG(ERR, USER1, "rte_ipsec_pkt_crypto_prepare fail\n");
284 sa->cnt.prepare_ticks_elapsed += time_stamp;
285 sa->cnt.nb_prepare_call++;
286 sa->cnt.nb_prepare_pkt += k;
288 for (i = 0; i < num_pkts; i++)
289 rte_crypto_op_free(sa->cop[i]);
295 packet_process(struct rte_mbuf **buf, struct ipsec_sa *sa,
301 time_stamp = rte_rdtsc_precise();
303 k = rte_ipsec_pkt_process(&sa->ss[0], buf, num_pkts);
305 time_stamp = rte_rdtsc_precise() - time_stamp;
308 RTE_LOG(ERR, USER1, "rte_ipsec_pkt_process fail\n");
312 sa->cnt.process_ticks_elapsed += time_stamp;
313 sa->cnt.nb_process_call++;
314 sa->cnt.nb_process_pkt += k;
320 create_traffic(struct ipsec_sa *sa, struct rte_ring *deq_ring,
321 struct rte_ring *enq_ring, struct rte_ring *ring)
323 struct rte_mbuf *mbuf[BURST_SIZE];
324 uint16_t num_pkts, n;
326 while (rte_ring_empty(deq_ring) == 0) {
328 num_pkts = rte_ring_sc_dequeue_burst(deq_ring, (void **)mbuf,
329 RTE_DIM(mbuf), NULL);
334 n = packet_prepare(mbuf, sa, num_pkts);
338 num_pkts = rte_ring_sp_enqueue_burst(enq_ring, (void **)mbuf,
347 while (rte_ring_empty(deq_ring) == 0) {
349 num_pkts = rte_ring_sc_dequeue_burst(deq_ring, (void **)mbuf,
350 RTE_DIM(mbuf), NULL);
354 n = packet_process(mbuf, sa, num_pkts);
358 num_pkts = rte_ring_sp_enqueue_burst(enq_ring, (void **)mbuf,
368 fill_ipsec_sa_out(const struct ipsec_test_cfg *test_cfg,
371 sa->ipsec_xform.spi = DEFAULT_SPI;
372 sa->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
373 sa->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
374 sa->ipsec_xform.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
375 sa->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
376 sa->ipsec_xform.options.esn = test_cfg->esn;
377 sa->type = test_cfg->type;
378 sa->replay_win_sz = test_cfg->replay_win_sz;
379 sa->sa_flags = test_cfg->flags;
380 sa->cnt.nb_prepare_call = 0;
381 sa->cnt.nb_prepare_pkt = 0;
382 sa->cnt.nb_process_call = 0;
383 sa->cnt.nb_process_pkt = 0;
384 sa->cnt.process_ticks_elapsed = 0;
385 sa->cnt.prepare_ticks_elapsed = 0;
390 fill_ipsec_sa_in(const struct ipsec_test_cfg *test_cfg,
393 sa->ipsec_xform.spi = DEFAULT_SPI;
394 sa->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
395 sa->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
396 sa->ipsec_xform.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
397 sa->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
398 sa->ipsec_xform.options.esn = test_cfg->esn;
399 sa->type = test_cfg->type;
400 sa->replay_win_sz = test_cfg->replay_win_sz;
401 sa->sa_flags = test_cfg->flags;
402 sa->cnt.nb_prepare_call = 0;
403 sa->cnt.nb_prepare_pkt = 0;
404 sa->cnt.nb_process_call = 0;
405 sa->cnt.nb_process_pkt = 0;
406 sa->cnt.process_ticks_elapsed = 0;
407 sa->cnt.prepare_ticks_elapsed = 0;
411 init_sa_session(const struct ipsec_test_cfg *test_cfg,
412 struct ipsec_sa *sa_out, struct ipsec_sa *sa_in)
417 fill_ipsec_sa_in(test_cfg, sa_in);
418 fill_ipsec_sa_out(test_cfg, sa_out);
420 rc = create_sa(RTE_SECURITY_ACTION_TYPE_NONE, sa_out);
422 RTE_LOG(ERR, USER1, "out bound create_sa failed, cfg\n");
426 rc = create_sa(RTE_SECURITY_ACTION_TYPE_NONE, sa_in);
428 RTE_LOG(ERR, USER1, "out bound create_sa failed, cfg\n");
436 testsuite_setup(void)
438 struct rte_mbuf *mbuf;
441 mbuf_pool = rte_pktmbuf_pool_create("IPSEC_PERF_MBUFPOOL",
442 NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
444 if (mbuf_pool == NULL) {
445 RTE_LOG(ERR, USER1, "Can't create MBUFPOOL\n");
449 cop_pool = rte_crypto_op_pool_create(
450 "MBUF_CRYPTO_SYM_OP_POOL",
451 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
452 NUM_MBUFS, MBUF_CACHE_SIZE,
454 sizeof(struct rte_crypto_sym_xform) +
457 if (cop_pool == NULL) {
458 RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
462 ring_inb_prepare = rte_ring_create("ipsec_test_ring_inb_prepare",
463 RING_SIZE, SOCKET_ID_ANY, 0);
464 if (ring_inb_prepare == NULL)
467 ring_inb_process = rte_ring_create("ipsec_test_ring_inb_process",
468 RING_SIZE, SOCKET_ID_ANY, 0);
469 if (ring_inb_process == NULL)
472 ring_outb_prepare = rte_ring_create("ipsec_test_ring_outb_prepare",
473 RING_SIZE, SOCKET_ID_ANY, 0);
474 if (ring_outb_prepare == NULL)
477 ring_outb_process = rte_ring_create("ipsec_test_ring_outb_process",
478 RING_SIZE, SOCKET_ID_ANY, 0);
479 if (ring_outb_process == NULL)
482 for (i = 0; i < NUM_MBUF; i++) {
483 mbuf = generate_mbuf_data(mbuf_pool);
485 if (mbuf && rte_ring_sp_enqueue_bulk(ring_inb_prepare,
486 (void **)&mbuf, 1, NULL))
496 measure_performance(struct ipsec_sa *sa_out, struct ipsec_sa *sa_in)
498 uint64_t time_diff = 0;
500 uint64_t hz = rte_get_timer_hz();
502 begin = rte_get_timer_cycles();
505 if (create_traffic(sa_out, ring_inb_prepare, ring_inb_process,
506 ring_outb_prepare) < 0)
509 if (create_traffic(sa_in, ring_outb_prepare, ring_outb_process,
510 ring_inb_prepare) < 0)
513 time_diff = rte_get_timer_cycles() - begin;
515 } while (time_diff < (hz * 10));
521 print_metrics(const struct ipsec_test_cfg *test_cfg,
522 struct ipsec_sa *sa_out, struct ipsec_sa *sa_in)
524 printf("\nMetrics of libipsec prepare/process api:\n");
526 printf("replay window size = %u\n", test_cfg->replay_win_sz);
528 printf("replay esn is enabled\n");
530 printf("replay esn is disabled\n");
531 if (test_cfg->type == RTE_CRYPTO_SYM_XFORM_AEAD)
532 printf("AEAD algo is AES_GCM\n");
534 printf("CIPHER/AUTH algo is AES_CBC/SHA1\n");
537 printf("avg cycles for a pkt prepare in outbound is = %.2Lf\n",
538 (long double)sa_out->cnt.prepare_ticks_elapsed
539 / sa_out->cnt.nb_prepare_pkt);
540 printf("avg cycles for a pkt process in outbound is = %.2Lf\n",
541 (long double)sa_out->cnt.process_ticks_elapsed
542 / sa_out->cnt.nb_process_pkt);
543 printf("avg cycles for a pkt prepare in inbound is = %.2Lf\n",
544 (long double)sa_in->cnt.prepare_ticks_elapsed
545 / sa_in->cnt.nb_prepare_pkt);
546 printf("avg cycles for a pkt process in inbound is = %.2Lf\n",
547 (long double)sa_in->cnt.process_ticks_elapsed
548 / sa_in->cnt.nb_process_pkt);
553 testsuite_teardown(void)
555 if (mbuf_pool != NULL) {
556 RTE_LOG(DEBUG, USER1, "MBUFPOOL count %u\n",
557 rte_mempool_avail_count(mbuf_pool));
558 rte_mempool_free(mbuf_pool);
562 if (cop_pool != NULL) {
563 RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
564 rte_mempool_avail_count(cop_pool));
565 rte_mempool_free(cop_pool);
569 rte_ring_free(ring_inb_prepare);
570 rte_ring_free(ring_inb_process);
571 rte_ring_free(ring_outb_prepare);
572 rte_ring_free(ring_outb_process);
574 ring_inb_prepare = NULL;
575 ring_inb_process = NULL;
576 ring_outb_prepare = NULL;
577 ring_outb_process = NULL;
581 test_libipsec_perf(void)
583 struct ipsec_sa sa_out;
584 struct ipsec_sa sa_in;
588 if (testsuite_setup() < 0) {
589 testsuite_teardown();
593 for (i = 0; i < RTE_DIM(test_cfg) ; i++) {
595 ret = init_sa_session(&test_cfg[i], &sa_out, &sa_in);
597 testsuite_teardown();
601 if (measure_performance(&sa_out, &sa_in) < 0) {
602 testsuite_teardown();
606 print_metrics(&test_cfg[i], &sa_out, &sa_in);
609 testsuite_teardown();
614 REGISTER_TEST_COMMAND(ipsec_perf_autotest, test_libipsec_perf);