1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include "test_perf_common.h"
9 #define NB_CRYPTODEV_DESCRIPTORS 128
11 struct modex_test_data {
12 enum rte_crypto_asym_xform_type xform_type;
14 uint8_t data[DATA_SIZE];
18 uint8_t data[DATA_SIZE];
22 uint8_t data[DATA_SIZE];
26 uint8_t data[DATA_SIZE];
33 modex_test_data modex_test_case = {
34 .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
37 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
38 0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
39 0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
51 0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
52 0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
53 0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
54 0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
55 0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
56 0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
57 0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
58 0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
59 0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
60 0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
61 0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
62 0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
63 0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
64 0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
65 0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
66 0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
72 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
73 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
74 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
75 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
76 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
77 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
78 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
79 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
80 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
81 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
82 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
83 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
84 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
85 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
86 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
87 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
95 perf_test_result(struct evt_test *test, struct evt_options *opt)
100 struct test_perf *t = evt_test_priv(test);
102 printf("Packet distribution across worker cores :\n");
103 for (i = 0; i < t->nb_workers; i++)
104 total += t->worker[i].processed_pkts;
105 for (i = 0; i < t->nb_workers; i++)
106 printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
107 CLGRN" %3.2f"CLNRM"\n", i,
108 t->worker[i].processed_pkts,
109 (((double)t->worker[i].processed_pkts)/total)
116 perf_producer(void *arg)
119 struct prod_data *p = arg;
120 struct test_perf *t = p->t;
121 struct evt_options *opt = t->opt;
122 const uint8_t dev_id = p->dev_id;
123 const uint8_t port = p->port_id;
124 struct rte_mempool *pool = t->pool;
125 const uint64_t nb_pkts = t->nb_pkts;
126 const uint32_t nb_flows = t->nb_flows;
127 uint32_t flow_counter = 0;
129 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
132 if (opt->verbose_level > 1)
133 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
134 rte_lcore_id(), dev_id, port, p->queue_id);
137 ev.op = RTE_EVENT_OP_NEW;
138 ev.queue_id = p->queue_id;
139 ev.sched_type = t->opt->sched_type_list[0];
140 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
141 ev.event_type = RTE_EVENT_TYPE_CPU;
142 ev.sub_event_type = 0; /* stage 0 */
144 while (count < nb_pkts && t->done == false) {
145 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
147 for (i = 0; i < BURST_SIZE; i++) {
148 ev.flow_id = flow_counter++ % nb_flows;
150 m[i]->timestamp = rte_get_timer_cycles();
151 while (rte_event_enqueue_burst(dev_id,
152 port, &ev, 1) != 1) {
156 m[i]->timestamp = rte_get_timer_cycles();
166 perf_producer_burst(void *arg)
170 struct rte_event_dev_info dev_info;
171 struct prod_data *p = arg;
172 struct test_perf *t = p->t;
173 struct evt_options *opt = t->opt;
174 const uint8_t dev_id = p->dev_id;
175 const uint8_t port = p->port_id;
176 struct rte_mempool *pool = t->pool;
177 const uint64_t nb_pkts = t->nb_pkts;
178 const uint32_t nb_flows = t->nb_flows;
179 uint32_t flow_counter = 0;
182 struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
183 struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
184 uint32_t burst_size = opt->prod_enq_burst_sz;
186 memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
187 rte_event_dev_info_get(dev_id, &dev_info);
188 if (dev_info.max_event_port_enqueue_depth < burst_size)
189 burst_size = dev_info.max_event_port_enqueue_depth;
191 if (opt->verbose_level > 1)
192 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
193 rte_lcore_id(), dev_id, port, p->queue_id);
195 for (i = 0; i < burst_size; i++) {
196 ev[i].op = RTE_EVENT_OP_NEW;
197 ev[i].queue_id = p->queue_id;
198 ev[i].sched_type = t->opt->sched_type_list[0];
199 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
200 ev[i].event_type = RTE_EVENT_TYPE_CPU;
201 ev[i].sub_event_type = 0; /* stage 0 */
204 while (count < nb_pkts && t->done == false) {
205 if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
207 timestamp = rte_get_timer_cycles();
208 for (i = 0; i < burst_size; i++) {
209 ev[i].flow_id = flow_counter++ % nb_flows;
210 ev[i].event_ptr = m[i];
211 m[i]->timestamp = timestamp;
213 enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
214 while (enq < burst_size) {
215 enq += rte_event_enqueue_burst(dev_id, port,
221 timestamp = rte_get_timer_cycles();
222 for (i = enq; i < burst_size; i++)
223 m[i]->timestamp = timestamp;
231 perf_event_timer_producer(void *arg)
234 struct prod_data *p = arg;
235 struct test_perf *t = p->t;
236 struct evt_options *opt = t->opt;
237 uint32_t flow_counter = 0;
239 uint64_t arm_latency = 0;
240 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
241 const uint32_t nb_flows = t->nb_flows;
242 const uint64_t nb_timers = opt->nb_timers;
243 struct rte_mempool *pool = t->pool;
244 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
245 struct rte_event_timer_adapter **adptr = t->timer_adptr;
246 struct rte_event_timer tim;
247 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
249 memset(&tim, 0, sizeof(struct rte_event_timer));
251 opt->optm_timer_tick_nsec
252 ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
253 opt->optm_timer_tick_nsec)
255 timeout_ticks += timeout_ticks ? 0 : 1;
256 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
257 tim.ev.op = RTE_EVENT_OP_NEW;
258 tim.ev.sched_type = t->opt->sched_type_list[0];
259 tim.ev.queue_id = p->queue_id;
260 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
261 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
262 tim.timeout_ticks = timeout_ticks;
264 if (opt->verbose_level > 1)
265 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
267 while (count < nb_timers && t->done == false) {
268 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
270 for (i = 0; i < BURST_SIZE; i++) {
271 rte_prefetch0(m[i + 1]);
273 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
274 m[i]->tim.ev.event_ptr = m[i];
275 m[i]->timestamp = rte_get_timer_cycles();
276 while (rte_event_timer_arm_burst(
277 adptr[flow_counter % nb_timer_adptrs],
278 (struct rte_event_timer **)&m[i], 1) != 1) {
281 m[i]->timestamp = rte_get_timer_cycles();
283 arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
289 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
290 __func__, rte_lcore_id(),
291 count ? (float)(arm_latency / count) /
292 (rte_get_timer_hz() / 1000000) : 0);
297 perf_event_timer_producer_burst(void *arg)
300 struct prod_data *p = arg;
301 struct test_perf *t = p->t;
302 struct evt_options *opt = t->opt;
303 uint32_t flow_counter = 0;
305 uint64_t arm_latency = 0;
306 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
307 const uint32_t nb_flows = t->nb_flows;
308 const uint64_t nb_timers = opt->nb_timers;
309 struct rte_mempool *pool = t->pool;
310 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
311 struct rte_event_timer_adapter **adptr = t->timer_adptr;
312 struct rte_event_timer tim;
313 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
315 memset(&tim, 0, sizeof(struct rte_event_timer));
317 opt->optm_timer_tick_nsec
318 ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
319 opt->optm_timer_tick_nsec)
321 timeout_ticks += timeout_ticks ? 0 : 1;
322 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
323 tim.ev.op = RTE_EVENT_OP_NEW;
324 tim.ev.sched_type = t->opt->sched_type_list[0];
325 tim.ev.queue_id = p->queue_id;
326 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
327 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
328 tim.timeout_ticks = timeout_ticks;
330 if (opt->verbose_level > 1)
331 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
333 while (count < nb_timers && t->done == false) {
334 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
336 for (i = 0; i < BURST_SIZE; i++) {
337 rte_prefetch0(m[i + 1]);
339 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
340 m[i]->tim.ev.event_ptr = m[i];
341 m[i]->timestamp = rte_get_timer_cycles();
343 rte_event_timer_arm_tmo_tick_burst(
344 adptr[flow_counter % nb_timer_adptrs],
345 (struct rte_event_timer **)m,
348 arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
353 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
354 __func__, rte_lcore_id(),
355 count ? (float)(arm_latency / count) /
356 (rte_get_timer_hz() / 1000000) : 0);
361 crypto_adapter_enq_op_new(struct prod_data *p)
363 struct test_perf *t = p->t;
364 const uint32_t nb_flows = t->nb_flows;
365 const uint64_t nb_pkts = t->nb_pkts;
366 struct rte_mempool *pool = t->pool;
367 struct evt_options *opt = t->opt;
368 uint16_t qp_id = p->ca.cdev_qp_id;
369 uint8_t cdev_id = p->ca.cdev_id;
370 uint32_t flow_counter = 0;
371 struct rte_crypto_op *op;
376 if (opt->verbose_level > 1)
377 printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
378 __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
381 len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
383 while (count < nb_pkts && t->done == false) {
384 if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
385 struct rte_crypto_sym_op *sym_op;
387 op = rte_crypto_op_alloc(t->ca_op_pool,
388 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
389 m = rte_pktmbuf_alloc(pool);
393 rte_pktmbuf_append(m, len);
396 sym_op->cipher.data.offset = 0;
397 sym_op->cipher.data.length = len;
398 rte_crypto_op_attach_sym_session(
399 op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
401 struct rte_crypto_asym_op *asym_op;
402 uint8_t *result = rte_zmalloc(NULL,
403 modex_test_case.result_len, 0);
405 op = rte_crypto_op_alloc(t->ca_op_pool,
406 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
408 asym_op->modex.base.data = modex_test_case.base.data;
409 asym_op->modex.base.length = modex_test_case.base.len;
410 asym_op->modex.result.data = result;
411 asym_op->modex.result.length = modex_test_case.result_len;
412 rte_crypto_op_attach_asym_session(
413 op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
415 while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
424 crypto_adapter_enq_op_fwd(struct prod_data *p)
426 const uint8_t dev_id = p->dev_id;
427 const uint8_t port = p->port_id;
428 struct test_perf *t = p->t;
429 const uint32_t nb_flows = t->nb_flows;
430 const uint64_t nb_pkts = t->nb_pkts;
431 struct rte_mempool *pool = t->pool;
432 struct evt_options *opt = t->opt;
433 uint32_t flow_counter = 0;
434 struct rte_crypto_op *op;
440 if (opt->verbose_level > 1)
441 printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
442 __func__, rte_lcore_id(), port, p->queue_id,
443 p->ca.cdev_id, p->ca.cdev_qp_id);
446 ev.op = RTE_EVENT_OP_NEW;
447 ev.queue_id = p->queue_id;
448 ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
449 ev.event_type = RTE_EVENT_TYPE_CPU;
450 len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
452 while (count < nb_pkts && t->done == false) {
453 if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
454 struct rte_crypto_sym_op *sym_op;
456 op = rte_crypto_op_alloc(t->ca_op_pool,
457 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
458 m = rte_pktmbuf_alloc(pool);
462 rte_pktmbuf_append(m, len);
465 sym_op->cipher.data.offset = 0;
466 sym_op->cipher.data.length = len;
467 rte_crypto_op_attach_sym_session(
468 op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
470 struct rte_crypto_asym_op *asym_op;
471 uint8_t *result = rte_zmalloc(NULL,
472 modex_test_case.result_len, 0);
474 op = rte_crypto_op_alloc(t->ca_op_pool,
475 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
477 asym_op->modex.base.data = modex_test_case.base.data;
478 asym_op->modex.base.length = modex_test_case.base.len;
479 asym_op->modex.result.data = result;
480 asym_op->modex.result.length = modex_test_case.result_len;
481 rte_crypto_op_attach_asym_session(
482 op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
486 while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
495 perf_event_crypto_producer(void *arg)
497 struct prod_data *p = arg;
498 struct evt_options *opt = p->t->opt;
500 if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
501 crypto_adapter_enq_op_new(p);
503 crypto_adapter_enq_op_fwd(p);
509 perf_producer_wrapper(void *arg)
511 struct prod_data *p = arg;
512 struct test_perf *t = p->t;
513 bool burst = evt_has_burst_mode(p->dev_id);
515 /* In case of synthetic producer, launch perf_producer or
516 * perf_producer_burst depending on producer enqueue burst size
518 if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
519 t->opt->prod_enq_burst_sz == 1)
520 return perf_producer(arg);
521 else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
522 t->opt->prod_enq_burst_sz > 1) {
524 evt_err("This event device does not support burst mode");
526 return perf_producer_burst(arg);
528 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
529 !t->opt->timdev_use_burst)
530 return perf_event_timer_producer(arg);
531 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
532 t->opt->timdev_use_burst)
533 return perf_event_timer_producer_burst(arg);
534 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
535 return perf_event_crypto_producer(arg);
539 static inline uint64_t
540 processed_pkts(struct test_perf *t)
545 for (i = 0; i < t->nb_workers; i++)
546 total += t->worker[i].processed_pkts;
551 static inline uint64_t
552 total_latency(struct test_perf *t)
557 for (i = 0; i < t->nb_workers; i++)
558 total += t->worker[i].latency;
565 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
566 int (*worker)(void *))
569 struct test_perf *t = evt_test_priv(test);
573 RTE_LCORE_FOREACH_WORKER(lcore_id) {
574 if (!(opt->wlcores[lcore_id]))
577 ret = rte_eal_remote_launch(worker,
578 &t->worker[port_idx], lcore_id);
580 evt_err("failed to launch worker %d", lcore_id);
586 /* launch producers */
587 RTE_LCORE_FOREACH_WORKER(lcore_id) {
588 if (!(opt->plcores[lcore_id]))
591 ret = rte_eal_remote_launch(perf_producer_wrapper,
592 &t->prod[port_idx], lcore_id);
594 evt_err("failed to launch perf_producer %d", lcore_id);
600 const uint64_t total_pkts = t->outstand_pkts;
602 uint64_t dead_lock_cycles = rte_get_timer_cycles();
603 int64_t dead_lock_remaining = total_pkts;
604 const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
606 uint64_t perf_cycles = rte_get_timer_cycles();
607 int64_t perf_remaining = total_pkts;
608 const uint64_t perf_sample = rte_get_timer_hz();
610 static float total_mpps;
611 static uint64_t samples;
613 const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
614 int64_t remaining = t->outstand_pkts - processed_pkts(t);
616 while (t->done == false) {
617 const uint64_t new_cycles = rte_get_timer_cycles();
619 if ((new_cycles - perf_cycles) > perf_sample) {
620 const uint64_t latency = total_latency(t);
621 const uint64_t pkts = processed_pkts(t);
623 remaining = t->outstand_pkts - pkts;
624 float mpps = (float)(perf_remaining-remaining)/1000000;
626 perf_remaining = remaining;
627 perf_cycles = new_cycles;
630 if (opt->fwd_latency && pkts > 0) {
631 printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
632 mpps, total_mpps/samples,
633 (float)(latency/pkts)/freq_mhz);
635 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
636 mpps, total_mpps/samples);
640 if (remaining <= 0) {
641 t->result = EVT_TEST_SUCCESS;
642 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
644 EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
646 EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
653 if (new_cycles - dead_lock_cycles > dead_lock_sample &&
654 (opt->prod_type == EVT_PROD_TYPE_SYNT ||
655 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
656 opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
657 remaining = t->outstand_pkts - processed_pkts(t);
658 if (dead_lock_remaining == remaining) {
659 rte_event_dev_dump(opt->dev_id, stdout);
660 evt_err("No schedules for seconds, deadlock");
664 dead_lock_remaining = remaining;
665 dead_lock_cycles = new_cycles;
673 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
674 struct rte_event_port_conf prod_conf)
678 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
680 memset(&queue_conf, 0,
681 sizeof(struct rte_event_eth_rx_adapter_queue_conf));
682 queue_conf.ev.sched_type = opt->sched_type_list[0];
683 RTE_ETH_FOREACH_DEV(prod) {
686 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
689 evt_err("failed to get event rx adapter[%d]"
694 queue_conf.ev.queue_id = prod * stride;
695 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
698 evt_err("failed to create rx adapter[%d]", prod);
701 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
704 evt_err("failed to add rx queues to adapter[%d]", prod);
708 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
711 rte_event_eth_rx_adapter_service_id_get(prod,
713 ret = evt_service_setup(service_id);
715 evt_err("Failed to setup service core"
716 " for Rx adapter\n");
726 perf_event_timer_adapter_setup(struct test_perf *t)
730 struct rte_event_timer_adapter_info adapter_info;
731 struct rte_event_timer_adapter *wl;
732 uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
733 uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
735 if (nb_producers == 1)
736 flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
738 for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
739 struct rte_event_timer_adapter_conf config = {
740 .event_dev_id = t->opt->dev_id,
741 .timer_adapter_id = i,
742 .timer_tick_ns = t->opt->timer_tick_nsec,
743 .max_tmo_ns = t->opt->max_tmo_nsec,
744 .nb_timers = t->opt->pool_sz,
748 wl = rte_event_timer_adapter_create(&config);
750 evt_err("failed to create event timer ring %d", i);
754 memset(&adapter_info, 0,
755 sizeof(struct rte_event_timer_adapter_info));
756 rte_event_timer_adapter_get_info(wl, &adapter_info);
757 t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
759 if (!(adapter_info.caps &
760 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
761 uint32_t service_id = -1U;
763 rte_event_timer_adapter_service_id_get(wl,
765 ret = evt_service_setup(service_id);
767 evt_err("Failed to setup service core"
768 " for timer adapter\n");
771 rte_service_runstate_set(service_id, 1);
773 t->timer_adptr[i] = wl;
779 perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
781 struct evt_options *opt = t->opt;
785 ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
787 evt_err("Failed to get crypto adapter capabilities");
791 if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
792 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
793 ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
794 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
795 evt_err("crypto adapter %s mode unsupported\n",
796 opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
798 } else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
799 evt_err("Storing crypto session not supported");
803 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
804 struct rte_event response_info;
806 response_info.event = 0;
807 response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
808 response_info.queue_id = p->queue_id;
809 ret = rte_event_crypto_adapter_queue_pair_add(
810 TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
813 ret = rte_event_crypto_adapter_queue_pair_add(
814 TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
820 static struct rte_cryptodev_sym_session *
821 cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
823 struct rte_crypto_sym_xform cipher_xform;
824 struct rte_cryptodev_sym_session *sess;
826 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
827 cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
828 cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
829 cipher_xform.next = NULL;
831 sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
833 evt_err("Failed to create sym session");
837 if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
838 t->ca_sess_priv_pool)) {
839 evt_err("Failed to init session");
847 cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
849 const struct rte_cryptodev_asymmetric_xform_capability *capability;
850 struct rte_cryptodev_asym_capability_idx cap_idx;
851 struct rte_crypto_asym_xform xform;
855 xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
856 cap_idx.type = xform.xform_type;
857 capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
858 if (capability == NULL) {
859 evt_err("Device doesn't support MODEX. Test Skipped\n");
863 xform.modex.modulus.data = modex_test_case.modulus.data;
864 xform.modex.modulus.length = modex_test_case.modulus.len;
865 xform.modex.exponent.data = modex_test_case.exponent.data;
866 xform.modex.exponent.length = modex_test_case.exponent.len;
868 if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
869 t->ca_asym_sess_pool, &sess)) {
870 evt_err("Failed to create asym session");
878 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
879 uint8_t stride, uint8_t nb_queues,
880 const struct rte_event_port_conf *port_conf)
882 struct test_perf *t = evt_test_priv(test);
886 /* setup one port per worker, linking to all queues */
887 for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
889 struct worker_data *w = &t->worker[port];
891 w->dev_id = opt->dev_id;
894 w->processed_pkts = 0;
897 struct rte_event_port_conf conf = *port_conf;
898 conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
900 ret = rte_event_port_setup(opt->dev_id, port, &conf);
902 evt_err("failed to setup port %d", port);
906 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
907 if (ret != nb_queues) {
908 evt_err("failed to link all queues to port %d", port);
913 /* port for producers, no links */
914 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
915 for ( ; port < perf_nb_event_ports(opt); port++) {
916 struct prod_data *p = &t->prod[port];
920 struct rte_event_port_conf conf = *port_conf;
921 conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
923 ret = perf_event_rx_adapter_setup(opt, stride, conf);
926 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
928 for ( ; port < perf_nb_event_ports(opt); port++) {
929 struct prod_data *p = &t->prod[port];
930 p->queue_id = prod * stride;
935 ret = perf_event_timer_adapter_setup(t);
938 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
939 struct rte_event_port_conf conf = *port_conf;
943 ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
944 opt->dev_id, &conf, 0);
946 evt_err("Failed to create crypto adapter");
951 for (; port < perf_nb_event_ports(opt); port++) {
952 union rte_event_crypto_metadata m_data;
953 struct prod_data *p = &t->prod[port];
956 if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
961 p->dev_id = opt->dev_id;
963 p->queue_id = prod * stride;
964 p->ca.cdev_id = cdev_id;
965 p->ca.cdev_qp_id = qp_id;
966 p->ca.crypto_sess = rte_zmalloc_socket(
967 NULL, sizeof(void *) * t->nb_flows,
968 RTE_CACHE_LINE_SIZE, opt->socket_id);
971 m_data.request_info.cdev_id = p->ca.cdev_id;
972 m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
973 m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
974 m_data.response_info.queue_id = p->queue_id;
976 for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
977 m_data.response_info.flow_id = flow_id;
978 if (opt->crypto_op_type ==
979 RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
980 struct rte_cryptodev_sym_session *sess;
982 sess = cryptodev_sym_sess_create(p, t);
986 rte_cryptodev_session_event_mdata_set(
989 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
990 RTE_CRYPTO_OP_WITH_SESSION,
991 &m_data, sizeof(m_data));
992 p->ca.crypto_sess[flow_id] = sess;
996 sess = cryptodev_asym_sess_create(p, t);
999 rte_cryptodev_session_event_mdata_set(
1002 RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
1003 RTE_CRYPTO_OP_WITH_SESSION,
1004 &m_data, sizeof(m_data));
1005 p->ca.crypto_sess[flow_id] = sess;
1009 conf.event_port_cfg |=
1010 RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1011 RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1013 ret = rte_event_port_setup(opt->dev_id, port, &conf);
1015 evt_err("failed to setup port %d", port);
1019 ret = perf_event_crypto_adapter_setup(t, p);
1028 for ( ; port < perf_nb_event_ports(opt); port++) {
1029 struct prod_data *p = &t->prod[port];
1031 p->dev_id = opt->dev_id;
1033 p->queue_id = prod * stride;
1036 struct rte_event_port_conf conf = *port_conf;
1037 conf.event_port_cfg |=
1038 RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1039 RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1041 ret = rte_event_port_setup(opt->dev_id, port, &conf);
1043 evt_err("failed to setup port %d", port);
1054 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
1056 unsigned int lcores;
1058 /* N producer + N worker + main when producer cores are used
1059 * Else N worker + main when Rx adapter is used
1061 lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
1063 if (rte_lcore_count() < lcores) {
1064 evt_err("test need minimum %d lcores", lcores);
1068 /* Validate worker lcores */
1069 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
1070 evt_err("worker lcores overlaps with main lcore");
1073 if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
1074 evt_err("worker lcores overlaps producer lcores");
1077 if (evt_has_disabled_lcore(opt->wlcores)) {
1078 evt_err("one or more workers lcores are not enabled");
1081 if (!evt_has_active_lcore(opt->wlcores)) {
1082 evt_err("minimum one worker is required");
1086 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1087 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1088 opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1089 /* Validate producer lcores */
1090 if (evt_lcores_has_overlap(opt->plcores,
1091 rte_get_main_lcore())) {
1092 evt_err("producer lcores overlaps with main lcore");
1095 if (evt_has_disabled_lcore(opt->plcores)) {
1096 evt_err("one or more producer lcores are not enabled");
1099 if (!evt_has_active_lcore(opt->plcores)) {
1100 evt_err("minimum one producer is required");
1105 if (evt_has_invalid_stage(opt))
1108 if (evt_has_invalid_sched_type(opt))
1111 if (nb_queues > EVT_MAX_QUEUES) {
1112 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
1115 if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
1116 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
1121 if ((opt->nb_stages == 1 &&
1122 opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
1124 evt_info("fwd_latency is valid when nb_stages > 1, disabling");
1125 opt->fwd_latency = 0;
1128 if (opt->fwd_latency && !opt->q_priority) {
1129 evt_info("enabled queue priority for latency measurement");
1130 opt->q_priority = 1;
1132 if (opt->nb_pkts == 0)
1133 opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
1139 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
1141 evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
1142 evt_dump_producer_lcores(opt);
1143 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
1144 evt_dump_worker_lcores(opt);
1145 evt_dump_nb_stages(opt);
1146 evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
1147 evt_dump("nb_evdev_queues", "%d", nb_queues);
1148 evt_dump_queue_priority(opt);
1149 evt_dump_sched_type_list(opt);
1150 evt_dump_producer_type(opt);
1151 evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
1155 perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
1158 rte_mempool_put(args, ev.event_ptr);
1162 perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
1163 uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
1169 for (i = nb_enq; i < nb_deq; i++)
1170 rte_mempool_put(pool, events[i].event_ptr);
1172 for (i = 0; i < nb_deq; i++)
1173 events[i].op = RTE_EVENT_OP_RELEASE;
1174 rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1176 rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1180 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
1183 struct test_perf *t = evt_test_priv(test);
1185 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1186 for (i = 0; i < opt->nb_timer_adptrs; i++)
1187 rte_event_timer_adapter_stop(t->timer_adptr[i]);
1189 rte_event_dev_stop(opt->dev_id);
1190 rte_event_dev_close(opt->dev_id);
1194 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
1195 void *obj, unsigned i __rte_unused)
1197 memset(obj, 0, mp->elt_size);
1200 #define NB_RX_DESC 128
1201 #define NB_TX_DESC 512
1203 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
1207 struct test_perf *t = evt_test_priv(test);
1208 struct rte_eth_conf port_conf = {
1210 .mq_mode = RTE_ETH_MQ_RX_RSS,
1211 .split_hdr_size = 0,
1216 .rss_hf = RTE_ETH_RSS_IP,
1221 if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
1224 if (!rte_eth_dev_count_avail()) {
1225 evt_err("No ethernet ports found.");
1229 RTE_ETH_FOREACH_DEV(i) {
1230 struct rte_eth_dev_info dev_info;
1231 struct rte_eth_conf local_port_conf = port_conf;
1233 ret = rte_eth_dev_info_get(i, &dev_info);
1235 evt_err("Error during getting device (port %u) info: %s\n",
1240 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1241 dev_info.flow_type_rss_offloads;
1242 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1243 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1244 evt_info("Port %u modified RSS hash function based on hardware support,"
1245 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1247 port_conf.rx_adv_conf.rss_conf.rss_hf,
1248 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1251 if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
1252 evt_err("Failed to configure eth port [%d]", i);
1256 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
1257 rte_socket_id(), NULL, t->pool) < 0) {
1258 evt_err("Failed to setup eth port [%d] rx_queue: %d.",
1263 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
1264 rte_socket_id(), NULL) < 0) {
1265 evt_err("Failed to setup eth port [%d] tx_queue: %d.",
1270 ret = rte_eth_promiscuous_enable(i);
1272 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
1273 i, rte_strerror(-ret));
1282 perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
1287 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1288 RTE_ETH_FOREACH_DEV(i) {
1289 rte_event_eth_rx_adapter_stop(i);
1290 rte_event_eth_rx_adapter_queue_del(i, i, -1);
1291 rte_eth_dev_rx_queue_stop(i, 0);
1297 perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1302 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1303 RTE_ETH_FOREACH_DEV(i) {
1304 rte_event_eth_tx_adapter_stop(i);
1305 rte_event_eth_tx_adapter_queue_del(i, i, -1);
1306 rte_eth_dev_tx_queue_stop(i, 0);
1307 rte_eth_dev_stop(i);
1313 perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1315 uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1316 struct test_perf *t = evt_test_priv(test);
1317 unsigned int max_session_size;
1318 uint32_t nb_sessions;
1321 if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1324 cdev_count = rte_cryptodev_count();
1325 if (cdev_count == 0) {
1326 evt_err("No crypto devices available\n");
1330 t->ca_op_pool = rte_crypto_op_pool_create(
1331 "crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
1332 128, sizeof(union rte_event_crypto_metadata),
1334 if (t->ca_op_pool == NULL) {
1335 evt_err("Failed to create crypto op pool");
1339 nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
1340 t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
1341 "ca_asym_sess_pool", nb_sessions, 0,
1342 sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1343 if (t->ca_asym_sess_pool == NULL) {
1344 evt_err("Failed to create sym session pool");
1349 t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1350 "ca_sess_pool", nb_sessions, 0, 0,
1351 sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1352 if (t->ca_sess_pool == NULL) {
1353 evt_err("Failed to create sym session pool");
1358 max_session_size = 0;
1359 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1360 unsigned int session_size;
1363 rte_cryptodev_sym_get_private_session_size(cdev_id);
1364 if (session_size > max_session_size)
1365 max_session_size = session_size;
1368 max_session_size += sizeof(union rte_event_crypto_metadata);
1369 t->ca_sess_priv_pool = rte_mempool_create(
1370 "ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0, NULL,
1371 NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1372 if (t->ca_sess_priv_pool == NULL) {
1373 evt_err("failed to create sym session private pool");
1379 * Calculate number of needed queue pairs, based on the amount of
1380 * available number of logical cores and crypto devices. For instance,
1381 * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1384 nb_plcores = evt_nr_active_lcores(opt->plcores);
1385 nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1386 nb_plcores / cdev_count;
1387 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1388 struct rte_cryptodev_qp_conf qp_conf;
1389 struct rte_cryptodev_config conf;
1390 struct rte_cryptodev_info info;
1393 rte_cryptodev_info_get(cdev_id, &info);
1394 if (nb_qps > info.max_nb_queue_pairs) {
1395 evt_err("Not enough queue pairs per cryptodev (%u)",
1401 conf.nb_queue_pairs = nb_qps;
1402 conf.socket_id = SOCKET_ID_ANY;
1403 conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1405 ret = rte_cryptodev_configure(cdev_id, &conf);
1407 evt_err("Failed to configure cryptodev (%u)", cdev_id);
1411 qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1412 qp_conf.mp_session = t->ca_sess_pool;
1413 qp_conf.mp_session_private = t->ca_sess_priv_pool;
1415 for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1416 ret = rte_cryptodev_queue_pair_setup(
1417 cdev_id, qp_id, &qp_conf,
1418 rte_cryptodev_socket_id(cdev_id));
1420 evt_err("Failed to setup queue pairs on cryptodev %u\n",
1429 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1430 rte_cryptodev_close(cdev_id);
1432 rte_mempool_free(t->ca_op_pool);
1433 rte_mempool_free(t->ca_sess_pool);
1434 rte_mempool_free(t->ca_sess_priv_pool);
1435 rte_mempool_free(t->ca_asym_sess_pool);
1441 perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1443 uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1444 struct test_perf *t = evt_test_priv(test);
1447 if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1450 for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
1451 struct rte_cryptodev_sym_session *sess;
1452 struct prod_data *p = &t->prod[port];
1456 for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1457 sess = p->ca.crypto_sess[flow_id];
1458 cdev_id = p->ca.cdev_id;
1459 rte_cryptodev_sym_session_clear(cdev_id, sess);
1460 rte_cryptodev_sym_session_free(sess);
1463 rte_event_crypto_adapter_queue_pair_del(
1464 TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
1467 rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
1469 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1470 rte_cryptodev_stop(cdev_id);
1471 rte_cryptodev_close(cdev_id);
1474 rte_mempool_free(t->ca_op_pool);
1475 rte_mempool_free(t->ca_sess_pool);
1476 rte_mempool_free(t->ca_sess_priv_pool);
1477 rte_mempool_free(t->ca_asym_sess_pool);
1481 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
1483 struct test_perf *t = evt_test_priv(test);
1485 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1486 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1487 t->pool = rte_mempool_create(test->name, /* mempool name */
1488 opt->pool_sz, /* number of elements*/
1489 sizeof(struct perf_elt), /* element size*/
1490 512, /* cache size*/
1492 perf_elt_init, /* obj constructor */
1493 NULL, opt->socket_id, 0); /* flags */
1495 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
1496 opt->pool_sz, /* number of elements*/
1497 512, /* cache size*/
1499 RTE_MBUF_DEFAULT_BUF_SIZE,
1500 opt->socket_id); /* flags */
1504 if (t->pool == NULL) {
1505 evt_err("failed to create mempool");
1513 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
1516 struct test_perf *t = evt_test_priv(test);
1518 rte_mempool_free(t->pool);
1522 perf_test_setup(struct evt_test *test, struct evt_options *opt)
1526 test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
1527 RTE_CACHE_LINE_SIZE, opt->socket_id);
1528 if (test_perf == NULL) {
1529 evt_err("failed to allocate test_perf memory");
1532 test->test_priv = test_perf;
1534 struct test_perf *t = evt_test_priv(test);
1536 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1537 t->outstand_pkts = opt->nb_timers *
1538 evt_nr_active_lcores(opt->plcores);
1539 t->nb_pkts = opt->nb_timers;
1541 t->outstand_pkts = opt->nb_pkts *
1542 evt_nr_active_lcores(opt->plcores);
1543 t->nb_pkts = opt->nb_pkts;
1546 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
1548 t->nb_flows = opt->nb_flows;
1549 t->result = EVT_TEST_FAILED;
1551 memcpy(t->sched_type_list, opt->sched_type_list,
1552 sizeof(opt->sched_type_list));
1559 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
1563 rte_free(test->test_priv);