#include <rte_common.h>
#include <rte_debug.h>
+#include <rte_event_crypto_adapter.h>
#include <rte_eventdev.h>
#include <rte_service.h>
EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
+ EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR, /* Producer type Crypto Adapter. */
EVT_PROD_TYPE_MAX,
};
uint64_t timer_tick_nsec;
uint64_t optm_timer_tick_nsec;
enum evt_prod_type prod_type;
+ enum rte_event_crypto_adapter_mode crypto_adptr_mode;
};
static inline bool
if (test->ops.ethdev_destroy)
test->ops.ethdev_destroy(test, &opt);
+ if (test->ops.cryptodev_destroy)
+ test->ops.cryptodev_destroy(test, &opt);
+
rte_eal_mp_wait_lcore();
if (test->ops.test_result)
}
}
+ /* Test specific cryptodev setup */
+ if (test->ops.cryptodev_setup) {
+ if (test->ops.cryptodev_setup(test, &opt)) {
+ evt_err("%s: cryptodev setup failed", opt.test_name);
+ goto ethdev_destroy;
+ }
+ }
+
/* Test specific eventdev setup */
if (test->ops.eventdev_setup) {
if (test->ops.eventdev_setup(test, &opt)) {
evt_err("%s: eventdev setup failed", opt.test_name);
- goto ethdev_destroy;
+ goto cryptodev_destroy;
}
}
if (test->ops.eventdev_destroy)
test->ops.eventdev_destroy(test, &opt);
+cryptodev_destroy:
+ if (test->ops.cryptodev_destroy)
+ test->ops.cryptodev_destroy(test, &opt);
+
ethdev_destroy:
if (test->ops.ethdev_destroy)
test->ops.ethdev_destroy(test, &opt);
return 0;
}
+static int
+evt_parse_crypto_prod_type(struct evt_options *opt,
+ const char *arg __rte_unused)
+{
+ opt->prod_type = EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR;
+ return 0;
+}
+
+static int
+evt_parse_crypto_adptr_mode(struct evt_options *opt, const char *arg)
+{
+ uint8_t mode;
+ int ret;
+
+ ret = parser_read_uint8(&mode, arg);
+ opt->crypto_adptr_mode = mode ? RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD :
+ RTE_EVENT_CRYPTO_ADAPTER_OP_NEW;
+ return ret;
+}
+
static int
evt_parse_test_name(struct evt_options *opt, const char *arg)
{
"\t--queue_priority : enable queue priority\n"
"\t--deq_tmo_nsec : global dequeue timeout\n"
"\t--prod_type_ethdev : use ethernet device as producer.\n"
+ "\t--prod_type_cryptodev : use crypto device as producer.\n"
"\t--prod_type_timerdev : use event timer device as producer.\n"
"\t expiry_nsec would be the timeout\n"
"\t in ns.\n"
"\t--timer_tick_nsec : timer tick interval in ns.\n"
"\t--max_tmo_nsec : max timeout interval in ns.\n"
"\t--expiry_nsec : event timer expiry ns.\n"
+ "\t--crypto_adptr_mode : 0 for OP_NEW mode (default) and\n"
+ "\t 1 for OP_FORWARD mode.\n"
"\t--mbuf_sz : packet mbuf size.\n"
"\t--max_pkt_sz : max packet size.\n"
"\t--prod_enq_burst_sz : producer enqueue burst size.\n"
{ EVT_QUEUE_PRIORITY, 0, 0, 0 },
{ EVT_DEQ_TMO_NSEC, 1, 0, 0 },
{ EVT_PROD_ETHDEV, 0, 0, 0 },
+ { EVT_PROD_CRYPTODEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
+ { EVT_CRYPTO_ADPTR_MODE, 1, 0, 0 },
{ EVT_NB_TIMERS, 1, 0, 0 },
{ EVT_NB_TIMER_ADPTRS, 1, 0, 0 },
{ EVT_TIMER_TICK_NSEC, 1, 0, 0 },
{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
{ EVT_DEQ_TMO_NSEC, evt_parse_deq_tmo_nsec},
{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
+ { EVT_PROD_CRYPTODEV, evt_parse_crypto_prod_type},
{ EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
{ EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
+ { EVT_CRYPTO_ADPTR_MODE, evt_parse_crypto_adptr_mode},
{ EVT_NB_TIMERS, evt_parse_nb_timers},
{ EVT_NB_TIMER_ADPTRS, evt_parse_nb_timer_adptrs},
{ EVT_TIMER_TICK_NSEC, evt_parse_timer_tick_nsec},
#include <stdbool.h>
#include <rte_common.h>
+#include <rte_cryptodev.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_lcore.h>
#define EVT_QUEUE_PRIORITY ("queue_priority")
#define EVT_DEQ_TMO_NSEC ("deq_tmo_nsec")
#define EVT_PROD_ETHDEV ("prod_type_ethdev")
+#define EVT_PROD_CRYPTODEV ("prod_type_cryptodev")
#define EVT_PROD_TIMERDEV ("prod_type_timerdev")
#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst")
+#define EVT_CRYPTO_ADPTR_MODE ("crypto_adptr_mode")
#define EVT_NB_TIMERS ("nb_timers")
#define EVT_NB_TIMER_ADPTRS ("nb_timer_adptrs")
#define EVT_TIMER_TICK_NSEC ("timer_tick_nsec")
return "Ethdev Rx Adapter";
case EVT_PROD_TYPE_EVENT_TIMER_ADPTR:
return "Event timer adapter";
+ case EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR:
+ return "Event crypto adapter";
}
return "";
evt_dump("timer_tick_nsec", "%"PRIu64"",
opt->timer_tick_nsec);
break;
+ case EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR:
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Event crypto adapter producers");
+ evt_dump("crypto adapter mode", "%s",
+ opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
+ evt_dump("nb_cryptodev", "%u", rte_cryptodev_count());
+ break;
}
evt_dump("prod_type", "%s", name);
}
(struct evt_test *test, struct evt_options *opt);
typedef int (*evt_test_ethdev_setup_t)
(struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_cryptodev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
typedef int (*evt_test_eventdev_setup_t)
(struct evt_test *test, struct evt_options *opt);
typedef int (*evt_test_launch_lcores_t)
(struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_ethdev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_cryptodev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_mempool_destroy_t)
(struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_destroy_t)
evt_test_mempool_setup_t mempool_setup;
evt_test_ethdev_setup_t ethdev_setup;
evt_test_eventdev_setup_t eventdev_setup;
+ evt_test_cryptodev_setup_t cryptodev_setup;
evt_test_launch_lcores_t launch_lcores;
evt_test_result_t test_result;
evt_test_eventdev_destroy_t eventdev_destroy;
evt_test_ethdev_destroy_t ethdev_destroy;
+ evt_test_cryptodev_destroy_t cryptodev_destroy;
evt_test_mempool_destroy_t mempool_destroy;
evt_test_destroy_t test_destroy;
};
continue;
}
+ if (prod_crypto_type &&
+ (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ struct rte_crypto_op *op = ev.event_ptr;
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (op->sym->m_dst == NULL)
+ ev.event_ptr = op->sym->m_src;
+ else
+ ev.event_ptr = op->sym->m_dst;
+ rte_crypto_op_free(op);
+ } else {
+ rte_crypto_op_free(op);
+ continue;
+ }
+ }
+
if (enable_fwd_latency && !prod_timer_type)
/* first stage in pipeline, mark ts to compute fwd latency */
atq_mark_fwd_latency(&ev);
}
for (i = 0; i < nb_rx; i++) {
+ if (prod_crypto_type &&
+ (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ struct rte_crypto_op *op = ev[i].event_ptr;
+
+ if (op->status ==
+ RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (op->sym->m_dst == NULL)
+ ev[i].event_ptr =
+ op->sym->m_src;
+ else
+ ev[i].event_ptr =
+ op->sym->m_dst;
+ rte_crypto_op_free(op);
+ } else {
+ rte_crypto_op_free(op);
+ continue;
+ }
+ }
+
if (enable_fwd_latency && !prod_timer_type) {
rte_prefetch0(ev[i+1].event_ptr);
/* first stage in pipeline.
return ret;
}
}
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ uint8_t cdev_id, cdev_count;
+
+ cdev_count = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ ret = rte_cryptodev_start(cdev_id);
+ if (ret) {
+ evt_err("Failed to start cryptodev %u",
+ cdev_id);
+ return ret;
+ }
+ }
}
return 0;
.opt_dump = perf_atq_opt_dump,
.test_setup = perf_test_setup,
.ethdev_setup = perf_ethdev_setup,
+ .cryptodev_setup = perf_cryptodev_setup,
.mempool_setup = perf_mempool_setup,
.eventdev_setup = perf_atq_eventdev_setup,
.launch_lcores = perf_atq_launch_lcores,
.eventdev_destroy = perf_eventdev_destroy,
.mempool_destroy = perf_mempool_destroy,
.ethdev_destroy = perf_ethdev_destroy,
+ .cryptodev_destroy = perf_cryptodev_destroy,
.test_result = perf_test_result,
.test_destroy = perf_test_destroy,
};
#include "test_perf_common.h"
+#define NB_CRYPTODEV_DESCRIPTORS 128
+
int
perf_test_result(struct evt_test *test, struct evt_options *opt)
{
return 0;
}
+static inline void
+crypto_adapter_enq_op_new(struct prod_data *p)
+{
+ struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct rte_crypto_sym_op *sym_op;
+ struct evt_options *opt = t->opt;
+ uint16_t qp_id = p->ca.cdev_qp_id;
+ uint8_t cdev_id = p->ca.cdev_id;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
+ p->ca.cdev_qp_id);
+
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, crypto_sess[flow_counter++ % nb_flows]);
+
+ while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline void
+crypto_adapter_enq_op_fwd(struct prod_data *p)
+{
+ struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct evt_options *opt = t->opt;
+ struct rte_crypto_sym_op *sym_op;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_event ev;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), port, p->queue_id,
+ p->ca.cdev_id, p->ca.cdev_qp_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, crypto_sess[flow_counter++ % nb_flows]);
+ ev.event_ptr = op;
+
+ while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline int
+perf_event_crypto_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct evt_options *opt = p->t->opt;
+
+ if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ crypto_adapter_enq_op_new(p);
+ else
+ crypto_adapter_enq_op_fwd(p);
+
+ return 0;
+}
+
static int
perf_producer_wrapper(void *arg)
{
else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
t->opt->timdev_use_burst)
return perf_event_timer_producer_burst(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return perf_event_crypto_producer(arg);
return 0;
}
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type ==
- EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
t->done = true;
break;
}
if (new_cycles - dead_lock_cycles > dead_lock_sample &&
(opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
remaining = t->outstand_pkts - processed_pkts(t);
if (dead_lock_remaining == remaining) {
rte_event_dev_dump(opt->dev_id, stdout);
return 0;
}
+static int
+perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
+{
+ struct evt_options *opt = t->opt;
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
+ if (ret) {
+ evt_err("Failed to get crypto adapter capabilities");
+ return ret;
+ }
+
+ if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
+ ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
+ evt_err("crypto adapter %s mode unsupported\n",
+ opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
+ return -ENOTSUP;
+ } else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
+ evt_err("Storing crypto session not supported");
+ return -ENOTSUP;
+ }
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ struct rte_event response_info;
+
+ response_info.event = 0;
+ response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ response_info.queue_id = p->queue_id;
+ ret = rte_event_crypto_adapter_queue_pair_add(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
+ &response_info);
+ } else {
+ ret = rte_event_crypto_adapter_queue_pair_add(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_sym_session *
+cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ cipher_xform.next = NULL;
+
+ sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
+ if (sess == NULL) {
+ evt_err("Failed to create sym session");
+ return NULL;
+ }
+
+ if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
+ t->ca_sess_priv_pool)) {
+ evt_err("Failed to init session");
+ return NULL;
+ }
+
+ return sess;
+}
+
int
perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues,
ret = perf_event_timer_adapter_setup(t);
if (ret)
return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ struct rte_event_port_conf conf = *port_conf;
+ uint8_t cdev_id = 0;
+ uint16_t qp_id = 0;
+
+ ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
+ opt->dev_id, &conf, 0);
+ if (ret) {
+ evt_err("Failed to create crypto adapter");
+ return ret;
+ }
+
+ prod = 0;
+ for (; port < perf_nb_event_ports(opt); port++) {
+ struct rte_cryptodev_sym_session *crypto_sess;
+ union rte_event_crypto_metadata m_data;
+ struct prod_data *p = &t->prod[port];
+ uint32_t flow_id;
+
+ if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
+ cdev_id++;
+ qp_id = 0;
+ }
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->ca.cdev_id = cdev_id;
+ p->ca.cdev_qp_id = qp_id;
+ p->ca.crypto_sess = rte_zmalloc_socket(
+ NULL, sizeof(crypto_sess) * t->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ p->t = t;
+
+ m_data.request_info.cdev_id = p->ca.cdev_id;
+ m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
+ m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ m_data.response_info.queue_id = p->queue_id;
+
+ for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
+ crypto_sess = cryptodev_sym_sess_create(p, t);
+ if (crypto_sess == NULL)
+ return -ENOMEM;
+
+ m_data.response_info.flow_id = flow_id;
+ rte_cryptodev_sym_session_set_user_data(
+ crypto_sess, &m_data, sizeof(m_data));
+ p->ca.crypto_sess[flow_id] = crypto_sess;
+ }
+
+ conf.event_port_cfg |=
+ RTE_EVENT_PORT_CFG_HINT_PRODUCER |
+ RTE_EVENT_PORT_CFG_HINT_CONSUMER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = perf_event_crypto_adapter_setup(t, p);
+ if (ret)
+ return ret;
+
+ qp_id++;
+ prod++;
+ }
} else {
prod = 0;
for ( ; port < perf_nb_event_ports(opt); port++) {
}
if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
/* Validate producer lcores */
if (evt_lcores_has_overlap(opt->plcores,
rte_get_main_lcore())) {
},
};
- if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
+ if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
return 0;
if (!rte_eth_dev_count_avail()) {
}
}
+int
+perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
+ struct test_perf *t = evt_test_priv(test);
+ unsigned int max_session_size;
+ uint32_t nb_sessions;
+ int ret;
+
+ if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return 0;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ evt_err("No crypto devices available\n");
+ return -ENODEV;
+ }
+
+ t->ca_op_pool = rte_crypto_op_pool_create(
+ "crypto_op_pool", RTE_CRYPTO_OP_TYPE_SYMMETRIC, opt->pool_sz,
+ 128, 0, rte_socket_id());
+ if (t->ca_op_pool == NULL) {
+ evt_err("Failed to create crypto op pool");
+ return -ENOMEM;
+ }
+
+ nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
+ t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
+ "ca_sess_pool", nb_sessions, 0, 0,
+ sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
+ if (t->ca_sess_pool == NULL) {
+ evt_err("Failed to create sym session pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ max_session_size = 0;
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ unsigned int session_size;
+
+ session_size =
+ rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (session_size > max_session_size)
+ max_session_size = session_size;
+ }
+
+ max_session_size += sizeof(union rte_event_crypto_metadata);
+ t->ca_sess_priv_pool = rte_mempool_create(
+ "ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0, NULL,
+ NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (t->ca_sess_priv_pool == NULL) {
+ evt_err("failed to create sym session private pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Calculate number of needed queue pairs, based on the amount of
+ * available number of logical cores and crypto devices. For instance,
+ * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
+ * up per device.
+ */
+ nb_plcores = evt_nr_active_lcores(opt->plcores);
+ nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
+ nb_plcores / cdev_count;
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_info info;
+ int qp_id;
+
+ rte_cryptodev_info_get(cdev_id, &info);
+ if (nb_qps > info.max_nb_queue_pairs) {
+ evt_err("Not enough queue pairs per cryptodev (%u)",
+ nb_qps);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ conf.nb_queue_pairs = nb_qps;
+ conf.socket_id = SOCKET_ID_ANY;
+ conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
+ if (ret) {
+ evt_err("Failed to configure cryptodev (%u)", cdev_id);
+ goto err;
+ }
+
+ qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
+ qp_conf.mp_session = t->ca_sess_pool;
+ qp_conf.mp_session_private = t->ca_sess_priv_pool;
+
+ for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
+ ret = rte_cryptodev_queue_pair_setup(
+ cdev_id, qp_id, &qp_conf,
+ rte_cryptodev_socket_id(cdev_id));
+ if (ret) {
+ evt_err("Failed to setup queue pairs on cryptodev %u\n",
+ cdev_id);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+err:
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
+ rte_cryptodev_close(cdev_id);
+
+ rte_mempool_free(t->ca_op_pool);
+ rte_mempool_free(t->ca_sess_pool);
+ rte_mempool_free(t->ca_sess_priv_pool);
+
+ return ret;
+}
+
+void
+perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t cdev_id, cdev_count = rte_cryptodev_count();
+ struct test_perf *t = evt_test_priv(test);
+ uint16_t port;
+
+ if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return;
+
+ for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
+ struct rte_cryptodev_sym_session *sess;
+ struct prod_data *p = &t->prod[port];
+ uint32_t flow_id;
+ uint8_t cdev_id;
+
+ for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
+ sess = p->ca.crypto_sess[flow_id];
+ cdev_id = p->ca.cdev_id;
+ rte_cryptodev_sym_session_clear(cdev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
+ }
+
+ rte_event_crypto_adapter_queue_pair_del(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
+ }
+
+ rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ rte_cryptodev_stop(cdev_id);
+ rte_cryptodev_close(cdev_id);
+ }
+
+ rte_mempool_free(t->ca_op_pool);
+ rte_mempool_free(t->ca_sess_pool);
+ rte_mempool_free(t->ca_sess_priv_pool);
+}
+
int
perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
{
#include <stdbool.h>
#include <unistd.h>
+#include <rte_cryptodev.h>
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
#include <rte_event_eth_rx_adapter.h>
#include <rte_event_timer_adapter.h>
#include <rte_lcore.h>
#include "evt_options.h"
#include "evt_test.h"
+#define TEST_PERF_CA_ID 0
+
struct test_perf;
struct worker_data {
struct test_perf *t;
} __rte_cache_aligned;
+struct crypto_adptr_data {
+ uint8_t cdev_id;
+ uint16_t cdev_qp_id;
+ struct rte_cryptodev_sym_session **crypto_sess;
+};
struct prod_data {
uint8_t dev_id;
uint8_t port_id;
uint8_t queue_id;
+ struct crypto_adptr_data ca;
struct test_perf *t;
} __rte_cache_aligned;
-
struct test_perf {
/* Don't change the offset of "done". Signal handler use this memory
* to terminate all lcores work.
uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
struct rte_event_timer_adapter *timer_adptr[
RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
+ struct rte_mempool *ca_op_pool;
+ struct rte_mempool *ca_sess_pool;
+ struct rte_mempool *ca_sess_priv_pool;
} __rte_cache_aligned;
struct perf_elt {
const uint8_t port = w->port_id;\
const uint8_t prod_timer_type = \
opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
+ const uint8_t prod_crypto_type = \
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR;\
uint8_t *const sched_type_list = &t->sched_type_list[0];\
struct rte_mempool *const pool = t->pool;\
const uint8_t nb_stages = t->opt->nb_stages;\
int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
int perf_test_setup(struct evt_test *test, struct evt_options *opt);
int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
+int perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt);
int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues,
void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
rte_pause();
continue;
}
+
+ if (prod_crypto_type &&
+ (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ struct rte_crypto_op *op = ev.event_ptr;
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (op->sym->m_dst == NULL)
+ ev.event_ptr = op->sym->m_src;
+ else
+ ev.event_ptr = op->sym->m_dst;
+ rte_crypto_op_free(op);
+ } else {
+ rte_crypto_op_free(op);
+ continue;
+ }
+ }
+
if (enable_fwd_latency && !prod_timer_type)
/* first q in pipeline, mark timestamp to compute fwd latency */
mark_fwd_latency(&ev, nb_stages);
}
for (i = 0; i < nb_rx; i++) {
+ if (prod_crypto_type &&
+ (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ struct rte_crypto_op *op = ev[i].event_ptr;
+
+ if (op->status ==
+ RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (op->sym->m_dst == NULL)
+ ev[i].event_ptr =
+ op->sym->m_src;
+ else
+ ev[i].event_ptr =
+ op->sym->m_dst;
+ rte_crypto_op_free(op);
+ } else {
+ rte_crypto_op_free(op);
+ continue;
+ }
+ }
+
if (enable_fwd_latency && !prod_timer_type) {
rte_prefetch0(ev[i+1].event_ptr);
/* first queue in pipeline.
return ret;
}
}
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ uint8_t cdev_id, cdev_count;
+
+ cdev_count = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ ret = rte_cryptodev_start(cdev_id);
+ if (ret) {
+ evt_err("Failed to start cryptodev %u",
+ cdev_id);
+ return ret;
+ }
+ }
}
return 0;
.test_setup = perf_test_setup,
.mempool_setup = perf_mempool_setup,
.ethdev_setup = perf_ethdev_setup,
+ .cryptodev_setup = perf_cryptodev_setup,
.eventdev_setup = perf_queue_eventdev_setup,
.launch_lcores = perf_queue_launch_lcores,
.eventdev_destroy = perf_eventdev_destroy,
.mempool_destroy = perf_mempool_destroy,
.ethdev_destroy = perf_ethdev_destroy,
+ .cryptodev_destroy = perf_cryptodev_destroy,
.test_result = perf_test_result,
.test_destroy = perf_test_destroy,
};
* Called ``rte_ipv4/6_udptcp_cksum_mbuf()`` functions in testpmd csum mode
to support software UDP/TCP checksum over multiple segments.
+* **Added crypto producer mode in test-eventdev.**
+
+ * Crypto producer mode helps to measure performance of OP_NEW and OP_FORWARD
+ modes of event crypto adapter.
+
Removed Items
-------------
Use burst mode event timer adapter as producer.
+* ``--prod_type_cryptodev``
+
+ Use crypto device as producer.
+
* ``--timer_tick_nsec``
Used to dictate number of nano seconds between bucket traversal of the
timeout is out of the supported range of event device it will be
adjusted to the highest/lowest supported dequeue timeout supported.
+* ``--crypto_adptr_mode``
+
+ Set crypto adapter mode. Use 0 for OP_NEW (default) and 1 for
+ OP_FORWARD mode.
+
* ``--mbuf_sz``
Set packet mbuf size. Can be used to configure Jumbo Frames. Only
--prod_type_ethdev
--prod_type_timerdev_burst
--prod_type_timerdev
+ --prod_type_cryptodev
--prod_enq_burst_sz
--timer_tick_nsec
--max_tmo_nsec
--nb_timers
--nb_timer_adptrs
--deq_tmo_nsec
+ --crypto_adptr_mode
Example
^^^^^^^
--prod_type_ethdev
--prod_type_timerdev_burst
--prod_type_timerdev
+ --prod_type_cryptodev
--timer_tick_nsec
--max_tmo_nsec
--expiry_nsec
--nb_timers
--nb_timer_adptrs
--deq_tmo_nsec
+ --crypto_adptr_mode
Example
^^^^^^^