+ count += burst_size;
+ }
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks =
+ opt->optm_timer_tick_nsec
+ ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
+ opt->optm_timer_tick_nsec)
+ : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_timer_arm_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)&m[i], 1) != 1) {
+ if (t->done)
+ break;
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
+ }
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(),
+ count ? (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000) : 0);
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer_burst(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks =
+ opt->optm_timer_tick_nsec
+ ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
+ opt->optm_timer_tick_nsec)
+ : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ rte_event_timer_arm_tmo_tick_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)m,
+ tim.timeout_ticks,
+ BURST_SIZE);
+ arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(),
+ count ? (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000) : 0);
+ return 0;
+}
+
+static inline void
+crypto_adapter_enq_op_new(struct prod_data *p)
+{
+ struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct rte_crypto_sym_op *sym_op;
+ struct evt_options *opt = t->opt;
+ uint16_t qp_id = p->ca.cdev_qp_id;
+ uint8_t cdev_id = p->ca.cdev_id;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
+ p->ca.cdev_qp_id);
+
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, crypto_sess[flow_counter++ % nb_flows]);
+
+ while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline void
+crypto_adapter_enq_op_fwd(struct prod_data *p)
+{
+ struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct evt_options *opt = t->opt;
+ struct rte_crypto_sym_op *sym_op;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_event ev;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), port, p->queue_id,
+ p->ca.cdev_id, p->ca.cdev_qp_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, crypto_sess[flow_counter++ % nb_flows]);
+ ev.event_ptr = op;
+
+ while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+