-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+#include <math.h>
+
#include "test_perf_common.h"
+#define NB_CRYPTODEV_DESCRIPTORS 128
+
int
perf_test_result(struct evt_test *test, struct evt_options *opt)
{
RTE_SET_USED(opt);
+ int i;
+ uint64_t total = 0;
struct test_perf *t = evt_test_priv(test);
+ printf("Packet distribution across worker cores :\n");
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+ for (i = 0; i < t->nb_workers; i++)
+ printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
+ CLGRN" %3.2f"CLNRM"\n", i,
+ t->worker[i].processed_pkts,
+ (((double)t->worker[i].processed_pkts)/total)
+ * 100);
+
return t->result;
}
+static inline int
+perf_producer(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = t->opt->sched_type_list[0];
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ ev.flow_id = flow_counter++ % nb_flows;
+ ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_enqueue_burst(dev_id,
+ port, &ev, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ }
+ count += BURST_SIZE;
+ }
+
+ return 0;
+}
+
+static inline int
+perf_producer_burst(void *arg)
+{
+ uint32_t i;
+ uint64_t timestamp;
+ struct rte_event_dev_info dev_info;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint16_t enq = 0;
+ uint64_t count = 0;
+ struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
+ struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
+ uint32_t burst_size = opt->prod_enq_burst_sz;
+
+ memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
+ rte_event_dev_info_get(dev_id, &dev_info);
+ if (dev_info.max_event_port_enqueue_depth < burst_size)
+ burst_size = dev_info.max_event_port_enqueue_depth;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ for (i = 0; i < burst_size; i++) {
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].queue_id = p->queue_id;
+ ev[i].sched_type = t->opt->sched_type_list[0];
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].event_type = RTE_EVENT_TYPE_CPU;
+ ev[i].sub_event_type = 0; /* stage 0 */
+ }
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
+ continue;
+ timestamp = rte_get_timer_cycles();
+ for (i = 0; i < burst_size; i++) {
+ ev[i].flow_id = flow_counter++ % nb_flows;
+ ev[i].event_ptr = m[i];
+ m[i]->timestamp = timestamp;
+ }
+ enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
+ while (enq < burst_size) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq,
+ burst_size - enq);
+ if (t->done)
+ break;
+ rte_pause();
+ timestamp = rte_get_timer_cycles();
+ for (i = enq; i < burst_size; i++)
+ m[i]->timestamp = timestamp;
+ }
+ count += burst_size;
+ }
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks =
+ opt->optm_timer_tick_nsec
+ ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
+ opt->optm_timer_tick_nsec)
+ : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_timer_arm_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)&m[i], 1) != 1) {
+ if (t->done)
+ break;
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
+ }
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(),
+ count ? (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000) : 0);
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer_burst(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks =
+ opt->optm_timer_tick_nsec
+ ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
+ opt->optm_timer_tick_nsec)
+ : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ rte_event_timer_arm_tmo_tick_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)m,
+ tim.timeout_ticks,
+ BURST_SIZE);
+ arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(),
+ count ? (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000) : 0);
+ return 0;
+}
+
+static inline void
+crypto_adapter_enq_op_new(struct prod_data *p)
+{
+ struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct rte_crypto_sym_op *sym_op;
+ struct evt_options *opt = t->opt;
+ uint16_t qp_id = p->ca.cdev_qp_id;
+ uint8_t cdev_id = p->ca.cdev_id;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
+ p->ca.cdev_qp_id);
+
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, crypto_sess[flow_counter++ % nb_flows]);
+
+ while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline void
+crypto_adapter_enq_op_fwd(struct prod_data *p)
+{
+ struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct evt_options *opt = t->opt;
+ struct rte_crypto_sym_op *sym_op;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_event ev;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), port, p->queue_id,
+ p->ca.cdev_id, p->ca.cdev_qp_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, crypto_sess[flow_counter++ % nb_flows]);
+ ev.event_ptr = op;
+
+ while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline int
+perf_event_crypto_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct evt_options *opt = p->t->opt;
+
+ if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ crypto_adapter_enq_op_new(p);
+ else
+ crypto_adapter_enq_op_fwd(p);
+
+ return 0;
+}
+
+static int
+perf_producer_wrapper(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ bool burst = evt_has_burst_mode(p->dev_id);
+
+ /* In case of synthetic producer, launch perf_producer or
+ * perf_producer_burst depending on producer enqueue burst size
+ */
+ if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
+ t->opt->prod_enq_burst_sz == 1)
+ return perf_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
+ t->opt->prod_enq_burst_sz > 1) {
+ if (!burst)
+ evt_err("This event device does not support burst mode");
+ else
+ return perf_producer_burst(arg);
+ }
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ !t->opt->timdev_use_burst)
+ return perf_event_timer_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ t->opt->timdev_use_burst)
+ return perf_event_timer_producer_burst(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return perf_event_crypto_producer(arg);
+ return 0;
+}
+
+static inline uint64_t
+processed_pkts(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+
+ return total;
+}
+
+static inline uint64_t
+total_latency(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].latency;
+
+ return total;
+}
+
+
+int
+perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_perf *t = evt_test_priv(test);
+
+ int port_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker,
+ &t->worker[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ /* launch producers */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ if (!(opt->plcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(perf_producer_wrapper,
+ &t->prod[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch perf_producer %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ const uint64_t total_pkts = t->outstand_pkts;
+
+ uint64_t dead_lock_cycles = rte_get_timer_cycles();
+ int64_t dead_lock_remaining = total_pkts;
+ const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
+
+ uint64_t perf_cycles = rte_get_timer_cycles();
+ int64_t perf_remaining = total_pkts;
+ const uint64_t perf_sample = rte_get_timer_hz();
+
+ static float total_mpps;
+ static uint64_t samples;
+
+ const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
+ int64_t remaining = t->outstand_pkts - processed_pkts(t);
+
+ while (t->done == false) {
+ const uint64_t new_cycles = rte_get_timer_cycles();
+
+ if ((new_cycles - perf_cycles) > perf_sample) {
+ const uint64_t latency = total_latency(t);
+ const uint64_t pkts = processed_pkts(t);
+
+ remaining = t->outstand_pkts - pkts;
+ float mpps = (float)(perf_remaining-remaining)/1000000;
+
+ perf_remaining = remaining;
+ perf_cycles = new_cycles;
+ total_mpps += mpps;
+ ++samples;
+ if (opt->fwd_latency && pkts > 0) {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
+ mpps, total_mpps/samples,
+ (float)(latency/pkts)/freq_mhz);
+ } else {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
+ mpps, total_mpps/samples);
+ }
+ fflush(stdout);
+
+ if (remaining <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ t->done = true;
+ break;
+ }
+ }
+ }
+
+ if (new_cycles - dead_lock_cycles > dead_lock_sample &&
+ (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
+ remaining = t->outstand_pkts - processed_pkts(t);
+ if (dead_lock_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->done = true;
+ break;
+ }
+ dead_lock_remaining = remaining;
+ dead_lock_cycles = new_cycles;
+ }
+ }
+ printf("\n");
+ return 0;
+}
+
+static int
+perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
+ struct rte_event_port_conf prod_conf)
+{
+ int ret = 0;
+ uint16_t prod;
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+
+ memset(&queue_conf, 0,
+ sizeof(struct rte_event_eth_rx_adapter_queue_conf));
+ queue_conf.ev.sched_type = opt->sched_type_list[0];
+ RTE_ETH_FOREACH_DEV(prod) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
+ prod, &cap);
+ if (ret) {
+ evt_err("failed to get event rx adapter[%d]"
+ " capabilities",
+ opt->dev_id);
+ return ret;
+ }
+ queue_conf.ev.queue_id = prod * stride;
+ ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
+ &prod_conf);
+ if (ret) {
+ evt_err("failed to create rx adapter[%d]", prod);
+ return ret;
+ }
+ ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
+ &queue_conf);
+ if (ret) {
+ evt_err("failed to add rx queues to adapter[%d]", prod);
+ return ret;
+ }
+
+ if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id;
+
+ rte_event_eth_rx_adapter_service_id_get(prod,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for Rx adapter\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+perf_event_timer_adapter_setup(struct test_perf *t)
+{
+ int i;
+ int ret;
+ struct rte_event_timer_adapter_info adapter_info;
+ struct rte_event_timer_adapter *wl;
+ uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
+ uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
+
+ if (nb_producers == 1)
+ flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
+
+ for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
+ struct rte_event_timer_adapter_conf config = {
+ .event_dev_id = t->opt->dev_id,
+ .timer_adapter_id = i,
+ .timer_tick_ns = t->opt->timer_tick_nsec,
+ .max_tmo_ns = t->opt->max_tmo_nsec,
+ .nb_timers = t->opt->pool_sz,
+ .flags = flags,
+ };
+
+ wl = rte_event_timer_adapter_create(&config);
+ if (wl == NULL) {
+ evt_err("failed to create event timer ring %d", i);
+ return rte_errno;
+ }
+
+ memset(&adapter_info, 0,
+ sizeof(struct rte_event_timer_adapter_info));
+ rte_event_timer_adapter_get_info(wl, &adapter_info);
+ t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
+
+ if (!(adapter_info.caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id = -1U;
+
+ rte_event_timer_adapter_service_id_get(wl,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for timer adapter\n");
+ return ret;
+ }
+ rte_service_runstate_set(service_id, 1);
+ }
+ t->timer_adptr[i] = wl;
+ }
+ return 0;
+}
+
+static int
+perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
+{
+ struct evt_options *opt = t->opt;
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
+ if (ret) {
+ evt_err("Failed to get crypto adapter capabilities");
+ return ret;
+ }
+
+ if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
+ ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
+ evt_err("crypto adapter %s mode unsupported\n",
+ opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
+ return -ENOTSUP;
+ } else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
+ evt_err("Storing crypto session not supported");
+ return -ENOTSUP;
+ }
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ struct rte_event response_info;
+
+ response_info.event = 0;
+ response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ response_info.queue_id = p->queue_id;
+ ret = rte_event_crypto_adapter_queue_pair_add(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
+ &response_info);
+ } else {
+ ret = rte_event_crypto_adapter_queue_pair_add(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_sym_session *
+cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ cipher_xform.next = NULL;
+
+ sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
+ if (sess == NULL) {
+ evt_err("Failed to create sym session");
+ return NULL;
+ }
+
+ if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
+ t->ca_sess_priv_pool)) {
+ evt_err("Failed to init session");
+ return NULL;
+ }
+
+ return sess;
+}
+
+int
+perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct test_perf *t = evt_test_priv(test);
+ uint16_t port, prod;
+ int ret = -1;
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
+ port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+ w->processed_pkts = 0;
+ w->latency = 0;
+
+ struct rte_event_port_conf conf = *port_conf;
+ conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+
+ /* port for producers, no links */
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+ p->t = t;
+ }
+
+ struct rte_event_port_conf conf = *port_conf;
+ conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
+
+ ret = perf_event_rx_adapter_setup(opt, stride, conf);
+ if (ret)
+ return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+ p->queue_id = prod * stride;
+ p->t = t;
+ prod++;
+ }
+
+ ret = perf_event_timer_adapter_setup(t);
+ if (ret)
+ return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ struct rte_event_port_conf conf = *port_conf;
+ uint8_t cdev_id = 0;
+ uint16_t qp_id = 0;
+
+ ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
+ opt->dev_id, &conf, 0);
+ if (ret) {
+ evt_err("Failed to create crypto adapter");
+ return ret;
+ }
+
+ prod = 0;
+ for (; port < perf_nb_event_ports(opt); port++) {
+ struct rte_cryptodev_sym_session *crypto_sess;
+ union rte_event_crypto_metadata m_data;
+ struct prod_data *p = &t->prod[port];
+ uint32_t flow_id;
+
+ if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
+ cdev_id++;
+ qp_id = 0;
+ }
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->ca.cdev_id = cdev_id;
+ p->ca.cdev_qp_id = qp_id;
+ p->ca.crypto_sess = rte_zmalloc_socket(
+ NULL, sizeof(crypto_sess) * t->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ p->t = t;
+
+ m_data.request_info.cdev_id = p->ca.cdev_id;
+ m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
+ m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ m_data.response_info.queue_id = p->queue_id;
+
+ for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
+ crypto_sess = cryptodev_sym_sess_create(p, t);
+ if (crypto_sess == NULL)
+ return -ENOMEM;
+
+ m_data.response_info.flow_id = flow_id;
+ rte_cryptodev_sym_session_set_user_data(
+ crypto_sess, &m_data, sizeof(m_data));
+ p->ca.crypto_sess[flow_id] = crypto_sess;
+ }
+
+ conf.event_port_cfg |=
+ RTE_EVENT_PORT_CFG_HINT_PRODUCER |
+ RTE_EVENT_PORT_CFG_HINT_CONSUMER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = perf_event_crypto_adapter_setup(t, p);
+ if (ret)
+ return ret;
+
+ qp_id++;
+ prod++;
+ }
+ } else {
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->t = t;
+
+ struct rte_event_port_conf conf = *port_conf;
+ conf.event_port_cfg |=
+ RTE_EVENT_PORT_CFG_HINT_PRODUCER |
+ RTE_EVENT_PORT_CFG_HINT_CONSUMER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+ prod++;
+ }
+ }
+
+ return ret;
+}
+
+int
+perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
+{
+ unsigned int lcores;
+
+ /* N producer + N worker + main when producer cores are used
+ * Else N worker + main when Rx adapter is used
+ */
+ lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
+
+ if (rte_lcore_count() < lcores) {
+ evt_err("test need minimum %d lcores", lcores);
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
+ evt_err("worker lcores overlaps with main lcore");
+ return -1;
+ }
+ if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
+ evt_err("worker lcores overlaps producer lcores");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ /* Validate producer lcores */
+ if (evt_lcores_has_overlap(opt->plcores,
+ rte_get_main_lcore())) {
+ evt_err("producer lcores overlaps with main lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->plcores)) {
+ evt_err("one or more producer lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->plcores)) {
+ evt_err("minimum one producer is required");
+ return -1;
+ }
+ }
+
+ if (evt_has_invalid_stage(opt))
+ return -1;
+
+ if (evt_has_invalid_sched_type(opt))
+ return -1;
+
+ if (nb_queues > EVT_MAX_QUEUES) {
+ evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
+ return -1;
+ }
+ if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
+ evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
+ return -1;
+ }
+
+ /* Fixups */
+ if ((opt->nb_stages == 1 &&
+ opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
+ opt->fwd_latency) {
+ evt_info("fwd_latency is valid when nb_stages > 1, disabling");
+ opt->fwd_latency = 0;
+ }
+
+ if (opt->fwd_latency && !opt->q_priority) {
+ evt_info("enabled queue priority for latency measurement");
+ opt->q_priority = 1;
+ }
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
+
+ return 0;
+}
+
+void
+perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
+{
+ evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump_nb_stages(opt);
+ evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
+ evt_dump("nb_evdev_queues", "%d", nb_queues);
+ evt_dump_queue_priority(opt);
+ evt_dump_sched_type_list(opt);
+ evt_dump_producer_type(opt);
+ evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
+}
+
void
perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- RTE_SET_USED(test);
+ int i;
+ struct test_perf *t = evt_test_priv(test);
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (i = 0; i < opt->nb_timer_adptrs; i++)
+ rte_event_timer_adapter_stop(t->timer_adptr[i]);
+ }
rte_event_dev_stop(opt->dev_id);
rte_event_dev_close(opt->dev_id);
}
memset(obj, 0, mp->elt_size);
}
+#define NB_RX_DESC 128
+#define NB_TX_DESC 512
+int
+perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ int ret;
+ struct test_perf *t = evt_test_priv(test);
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
+ .split_hdr_size = 0,
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_hf = RTE_ETH_RSS_IP,
+ },
+ },
+ };
+
+ if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
+ return 0;
+
+ if (!rte_eth_dev_count_avail()) {
+ evt_err("No ethernet ports found.");
+ return -ENODEV;
+ }
+
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ ret = rte_eth_dev_info_get(i, &dev_info);
+ if (ret != 0) {
+ evt_err("Error during getting device (port %u) info: %s\n",
+ i, strerror(-ret));
+ return ret;
+ }
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
+ if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
+ evt_err("Failed to configure eth port [%d]", i);
+ return -EINVAL;
+ }
+
+ if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
+ rte_socket_id(), NULL, t->pool) < 0) {
+ evt_err("Failed to setup eth port [%d] rx_queue: %d.",
+ i, 0);
+ return -EINVAL;
+ }
+
+ if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
+ rte_socket_id(), NULL) < 0) {
+ evt_err("Failed to setup eth port [%d] tx_queue: %d.",
+ i, 0);
+ return -EINVAL;
+ }
+
+ ret = rte_eth_promiscuous_enable(i);
+ if (ret != 0) {
+ evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
+ i, rte_strerror(-ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_rx_adapter_stop(i);
+ rte_eth_dev_stop(i);
+ }
+ }
+}
+
+int
+perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
+ struct test_perf *t = evt_test_priv(test);
+ unsigned int max_session_size;
+ uint32_t nb_sessions;
+ int ret;
+
+ if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return 0;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ evt_err("No crypto devices available\n");
+ return -ENODEV;
+ }
+
+ t->ca_op_pool = rte_crypto_op_pool_create(
+ "crypto_op_pool", RTE_CRYPTO_OP_TYPE_SYMMETRIC, opt->pool_sz,
+ 128, 0, rte_socket_id());
+ if (t->ca_op_pool == NULL) {
+ evt_err("Failed to create crypto op pool");
+ return -ENOMEM;
+ }
+
+ nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
+ t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
+ "ca_sess_pool", nb_sessions, 0, 0,
+ sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
+ if (t->ca_sess_pool == NULL) {
+ evt_err("Failed to create sym session pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ max_session_size = 0;
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ unsigned int session_size;
+
+ session_size =
+ rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (session_size > max_session_size)
+ max_session_size = session_size;
+ }
+
+ max_session_size += sizeof(union rte_event_crypto_metadata);
+ t->ca_sess_priv_pool = rte_mempool_create(
+ "ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0, NULL,
+ NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (t->ca_sess_priv_pool == NULL) {
+ evt_err("failed to create sym session private pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Calculate number of needed queue pairs, based on the amount of
+ * available number of logical cores and crypto devices. For instance,
+ * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
+ * up per device.
+ */
+ nb_plcores = evt_nr_active_lcores(opt->plcores);
+ nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
+ nb_plcores / cdev_count;
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_info info;
+ int qp_id;
+
+ rte_cryptodev_info_get(cdev_id, &info);
+ if (nb_qps > info.max_nb_queue_pairs) {
+ evt_err("Not enough queue pairs per cryptodev (%u)",
+ nb_qps);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ conf.nb_queue_pairs = nb_qps;
+ conf.socket_id = SOCKET_ID_ANY;
+ conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
+ if (ret) {
+ evt_err("Failed to configure cryptodev (%u)", cdev_id);
+ goto err;
+ }
+
+ qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
+ qp_conf.mp_session = t->ca_sess_pool;
+ qp_conf.mp_session_private = t->ca_sess_priv_pool;
+
+ for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
+ ret = rte_cryptodev_queue_pair_setup(
+ cdev_id, qp_id, &qp_conf,
+ rte_cryptodev_socket_id(cdev_id));
+ if (ret) {
+ evt_err("Failed to setup queue pairs on cryptodev %u\n",
+ cdev_id);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+err:
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
+ rte_cryptodev_close(cdev_id);
+
+ rte_mempool_free(t->ca_op_pool);
+ rte_mempool_free(t->ca_sess_pool);
+ rte_mempool_free(t->ca_sess_priv_pool);
+
+ return ret;
+}
+
+void
+perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t cdev_id, cdev_count = rte_cryptodev_count();
+ struct test_perf *t = evt_test_priv(test);
+ uint16_t port;
+
+ if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return;
+
+ for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
+ struct rte_cryptodev_sym_session *sess;
+ struct prod_data *p = &t->prod[port];
+ uint32_t flow_id;
+ uint8_t cdev_id;
+
+ for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
+ sess = p->ca.crypto_sess[flow_id];
+ cdev_id = p->ca.cdev_id;
+ rte_cryptodev_sym_session_clear(cdev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
+ }
+
+ rte_event_crypto_adapter_queue_pair_del(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
+ }
+
+ rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ rte_cryptodev_stop(cdev_id);
+ rte_cryptodev_close(cdev_id);
+ }
+
+ rte_mempool_free(t->ca_op_pool);
+ rte_mempool_free(t->ca_sess_pool);
+ rte_mempool_free(t->ca_sess_priv_pool);
+}
+
int
perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
{
struct test_perf *t = evt_test_priv(test);
- t->pool = rte_mempool_create(test->name, /* mempool name */
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->pool = rte_mempool_create(test->name, /* mempool name */
opt->pool_sz, /* number of elements*/
sizeof(struct perf_elt), /* element size*/
512, /* cache size*/
0, NULL, NULL,
perf_elt_init, /* obj constructor */
NULL, opt->socket_id, 0); /* flags */
+ } else {
+ t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ 512, /* cache size*/
+ 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ opt->socket_id); /* flags */
+
+ }
+
if (t->pool == NULL) {
evt_err("failed to create mempool");
return -ENOMEM;
struct test_perf *t = evt_test_priv(test);
- t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->outstand_pkts = opt->nb_timers *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_timers;
+ } else {
+ t->outstand_pkts = opt->nb_pkts *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_pkts;
+ }
+
t->nb_workers = evt_nr_active_lcores(opt->wlcores);
t->done = false;
- t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
t->result = EVT_TEST_FAILED;
t->opt = opt;