#include "test_perf_common.h"
+#define NB_CRYPTODEV_DESCRIPTORS 128
+#define DATA_SIZE 512
+struct modex_test_data {
+ enum rte_crypto_asym_xform_type xform_type;
+ struct {
+ uint8_t data[DATA_SIZE];
+ uint16_t len;
+ } base;
+ struct {
+ uint8_t data[DATA_SIZE];
+ uint16_t len;
+ } exponent;
+ struct {
+ uint8_t data[DATA_SIZE];
+ uint16_t len;
+ } modulus;
+ struct {
+ uint8_t data[DATA_SIZE];
+ uint16_t len;
+ } reminder;
+ uint16_t result_len;
+};
+
+static struct
+modex_test_data modex_test_case = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .base = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
+ 0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
+ 0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20,
+ },
+ .exponent = {
+ .data = {
+ 0x01, 0x00, 0x01
+ },
+ .len = 3,
+ },
+ .reminder = {
+ .data = {
+ 0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
+ 0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
+ 0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
+ 0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
+ 0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
+ 0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
+ 0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
+ 0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
+ 0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
+ 0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
+ 0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
+ 0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
+ 0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
+ 0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
+ 0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
+ 0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
+ },
+ .len = 128,
+ },
+ .modulus = {
+ .data = {
+ 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
+ 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
+ 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
+ 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
+ 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
+ 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
+ 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
+ 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
+ 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
+ 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
+ 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
+ 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
+ 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
+ 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
+ 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
+ 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
+ },
+ .len = 128,
+ },
+ .result_len = 128,
+};
+
int
perf_test_result(struct evt_test *test, struct evt_options *opt)
{
return 0;
}
+static inline int
+perf_producer_burst(void *arg)
+{
+ uint32_t i;
+ uint64_t timestamp;
+ struct rte_event_dev_info dev_info;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint16_t enq = 0;
+ uint64_t count = 0;
+ struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
+ struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
+ uint32_t burst_size = opt->prod_enq_burst_sz;
+
+ memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
+ rte_event_dev_info_get(dev_id, &dev_info);
+ if (dev_info.max_event_port_enqueue_depth < burst_size)
+ burst_size = dev_info.max_event_port_enqueue_depth;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ for (i = 0; i < burst_size; i++) {
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].queue_id = p->queue_id;
+ ev[i].sched_type = t->opt->sched_type_list[0];
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].event_type = RTE_EVENT_TYPE_CPU;
+ ev[i].sub_event_type = 0; /* stage 0 */
+ }
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
+ continue;
+ timestamp = rte_get_timer_cycles();
+ for (i = 0; i < burst_size; i++) {
+ ev[i].flow_id = flow_counter++ % nb_flows;
+ ev[i].event_ptr = m[i];
+ m[i]->timestamp = timestamp;
+ }
+ enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
+ while (enq < burst_size) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq,
+ burst_size - enq);
+ if (t->done)
+ break;
+ rte_pause();
+ timestamp = rte_get_timer_cycles();
+ for (i = enq; i < burst_size; i++)
+ m[i]->timestamp = timestamp;
+ }
+ count += burst_size;
+ }
+ return 0;
+}
+
static inline int
perf_event_timer_producer(void *arg)
{
return 0;
}
+static inline void
+crypto_adapter_enq_op_new(struct prod_data *p)
+{
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct evt_options *opt = t->opt;
+ uint16_t qp_id = p->ca.cdev_qp_id;
+ uint8_t cdev_id = p->ca.cdev_id;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
+ p->ca.cdev_qp_id);
+
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ struct rte_crypto_sym_op *sym_op;
+
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
+ } else {
+ struct rte_crypto_asym_op *asym_op;
+ uint8_t *result = rte_zmalloc(NULL,
+ modex_test_case.result_len, 0);
+
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ asym_op = op->asym;
+ asym_op->modex.base.data = modex_test_case.base.data;
+ asym_op->modex.base.length = modex_test_case.base.len;
+ asym_op->modex.result.data = result;
+ asym_op->modex.result.length = modex_test_case.result_len;
+ rte_crypto_op_attach_asym_session(
+ op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
+ }
+ while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline void
+crypto_adapter_enq_op_fwd(struct prod_data *p)
+{
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct test_perf *t = p->t;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_pkts = t->nb_pkts;
+ struct rte_mempool *pool = t->pool;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ struct rte_crypto_op *op;
+ struct rte_event ev;
+ struct rte_mbuf *m;
+ uint64_t count = 0;
+ uint16_t len;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
+ __func__, rte_lcore_id(), port, p->queue_id,
+ p->ca.cdev_id, p->ca.cdev_qp_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+ while (count < nb_pkts && t->done == false) {
+ if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ struct rte_crypto_sym_op *sym_op;
+
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ rte_pktmbuf_append(m, len);
+ sym_op = op->sym;
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = len;
+ rte_crypto_op_attach_sym_session(
+ op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
+ } else {
+ struct rte_crypto_asym_op *asym_op;
+ uint8_t *result = rte_zmalloc(NULL,
+ modex_test_case.result_len, 0);
+
+ op = rte_crypto_op_alloc(t->ca_op_pool,
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ asym_op = op->asym;
+ asym_op->modex.base.data = modex_test_case.base.data;
+ asym_op->modex.base.length = modex_test_case.base.len;
+ asym_op->modex.result.data = result;
+ asym_op->modex.result.length = modex_test_case.result_len;
+ rte_crypto_op_attach_asym_session(
+ op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
+ }
+ ev.event_ptr = op;
+
+ while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
+ t->done == false)
+ rte_pause();
+
+ count++;
+ }
+}
+
+static inline int
+perf_event_crypto_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct evt_options *opt = p->t->opt;
+
+ if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ crypto_adapter_enq_op_new(p);
+ else
+ crypto_adapter_enq_op_fwd(p);
+
+ return 0;
+}
+
static int
perf_producer_wrapper(void *arg)
{
struct prod_data *p = arg;
struct test_perf *t = p->t;
- /* Launch the producer function only in case of synthetic producer. */
- if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
+ bool burst = evt_has_burst_mode(p->dev_id);
+
+ /* In case of synthetic producer, launch perf_producer or
+ * perf_producer_burst depending on producer enqueue burst size
+ */
+ if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
+ t->opt->prod_enq_burst_sz == 1)
return perf_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
+ t->opt->prod_enq_burst_sz > 1) {
+ if (!burst)
+ evt_err("This event device does not support burst mode");
+ else
+ return perf_producer_burst(arg);
+ }
else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
!t->opt->timdev_use_burst)
return perf_event_timer_producer(arg);
else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
t->opt->timdev_use_burst)
return perf_event_timer_producer_burst(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return perf_event_crypto_producer(arg);
return 0;
}
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type ==
- EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
t->done = true;
break;
}
if (new_cycles - dead_lock_cycles > dead_lock_sample &&
(opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
remaining = t->outstand_pkts - processed_pkts(t);
if (dead_lock_remaining == remaining) {
rte_event_dev_dump(opt->dev_id, stdout);
return 0;
}
+static int
+perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
+{
+ struct evt_options *opt = t->opt;
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
+ if (ret) {
+ evt_err("Failed to get crypto adapter capabilities");
+ return ret;
+ }
+
+ if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
+ ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
+ evt_err("crypto adapter %s mode unsupported\n",
+ opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
+ return -ENOTSUP;
+ } else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
+ evt_err("Storing crypto session not supported");
+ return -ENOTSUP;
+ }
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ struct rte_event response_info;
+
+ response_info.event = 0;
+ response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ response_info.queue_id = p->queue_id;
+ ret = rte_event_crypto_adapter_queue_pair_add(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
+ &response_info);
+ } else {
+ ret = rte_event_crypto_adapter_queue_pair_add(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_sym_session *
+cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ cipher_xform.next = NULL;
+
+ sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
+ if (sess == NULL) {
+ evt_err("Failed to create sym session");
+ return NULL;
+ }
+
+ if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
+ t->ca_sess_priv_pool)) {
+ evt_err("Failed to init session");
+ return NULL;
+ }
+
+ return sess;
+}
+
+static void *
+cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
+{
+ const struct rte_cryptodev_asymmetric_xform_capability *capability;
+ struct rte_cryptodev_asym_capability_idx cap_idx;
+ struct rte_crypto_asym_xform xform;
+ void *sess;
+
+ xform.next = NULL;
+ xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
+ cap_idx.type = xform.xform_type;
+ capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
+ if (capability == NULL) {
+ evt_err("Device doesn't support MODEX. Test Skipped\n");
+ return NULL;
+ }
+
+ xform.modex.modulus.data = modex_test_case.modulus.data;
+ xform.modex.modulus.length = modex_test_case.modulus.len;
+ xform.modex.exponent.data = modex_test_case.exponent.data;
+ xform.modex.exponent.length = modex_test_case.exponent.len;
+
+ if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
+ t->ca_asym_sess_pool, &sess)) {
+ evt_err("Failed to create asym session");
+ return NULL;
+ }
+
+ return sess;
+}
+
int
perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues,
w->processed_pkts = 0;
w->latency = 0;
- ret = rte_event_port_setup(opt->dev_id, port, port_conf);
+ struct rte_event_port_conf conf = *port_conf;
+ conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
p->t = t;
}
- ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
+ struct rte_event_port_conf conf = *port_conf;
+ conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
+
+ ret = perf_event_rx_adapter_setup(opt, stride, conf);
if (ret)
return ret;
} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
ret = perf_event_timer_adapter_setup(t);
if (ret)
return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ struct rte_event_port_conf conf = *port_conf;
+ uint8_t cdev_id = 0;
+ uint16_t qp_id = 0;
+
+ ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
+ opt->dev_id, &conf, 0);
+ if (ret) {
+ evt_err("Failed to create crypto adapter");
+ return ret;
+ }
+
+ prod = 0;
+ for (; port < perf_nb_event_ports(opt); port++) {
+ union rte_event_crypto_metadata m_data;
+ struct prod_data *p = &t->prod[port];
+ uint32_t flow_id;
+
+ if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
+ cdev_id++;
+ qp_id = 0;
+ }
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->ca.cdev_id = cdev_id;
+ p->ca.cdev_qp_id = qp_id;
+ p->ca.crypto_sess = rte_zmalloc_socket(
+ NULL, sizeof(void *) * t->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ p->t = t;
+
+ m_data.request_info.cdev_id = p->ca.cdev_id;
+ m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
+ m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ m_data.response_info.queue_id = p->queue_id;
+
+ for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
+ m_data.response_info.flow_id = flow_id;
+ if (opt->crypto_op_type ==
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ struct rte_cryptodev_sym_session *sess;
+
+ sess = cryptodev_sym_sess_create(p, t);
+ if (sess == NULL)
+ return -ENOMEM;
+
+ rte_cryptodev_session_event_mdata_set(
+ cdev_id,
+ sess,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ RTE_CRYPTO_OP_WITH_SESSION,
+ &m_data, sizeof(m_data));
+ p->ca.crypto_sess[flow_id] = sess;
+ } else {
+ void *sess;
+
+ sess = cryptodev_asym_sess_create(p, t);
+ if (sess == NULL)
+ return -ENOMEM;
+ rte_cryptodev_session_event_mdata_set(
+ cdev_id,
+ sess,
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ RTE_CRYPTO_OP_WITH_SESSION,
+ &m_data, sizeof(m_data));
+ p->ca.crypto_sess[flow_id] = sess;
+ }
+ }
+
+ conf.event_port_cfg |=
+ RTE_EVENT_PORT_CFG_HINT_PRODUCER |
+ RTE_EVENT_PORT_CFG_HINT_CONSUMER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = perf_event_crypto_adapter_setup(t, p);
+ if (ret)
+ return ret;
+
+ qp_id++;
+ prod++;
+ }
} else {
prod = 0;
for ( ; port < perf_nb_event_ports(opt); port++) {
p->queue_id = prod * stride;
p->t = t;
- ret = rte_event_port_setup(opt->dev_id, port,
- port_conf);
+ struct rte_event_port_conf conf = *port_conf;
+ conf.event_port_cfg |=
+ RTE_EVENT_PORT_CFG_HINT_PRODUCER |
+ RTE_EVENT_PORT_CFG_HINT_CONSUMER;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
}
if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
/* Validate producer lcores */
if (evt_lcores_has_overlap(opt->plcores,
rte_get_main_lcore())) {
evt_dump_queue_priority(opt);
evt_dump_sched_type_list(opt);
evt_dump_producer_type(opt);
+ evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
+}
+
+static void
+perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args)
+{
+ rte_mempool_put(args, ev.event_ptr);
+}
+
+void
+perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
+ uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq)
+{
+ int i;
+
+ if (nb_deq) {
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_mempool_put(pool, events[i].event_ptr);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
+ }
+ rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
}
void
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
};
- if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
+ if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
return 0;
if (!rte_eth_dev_count_avail()) {
return 0;
}
-void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+void
+perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
{
uint16_t i;
RTE_SET_USED(test);
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_rx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_rx_queue_stop(i, 0);
+ }
+ }
+}
+
+void
+perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_tx_adapter_stop(i);
+ rte_event_eth_tx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_tx_queue_stop(i, 0);
rte_eth_dev_stop(i);
}
}
}
+int
+perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
+ struct test_perf *t = evt_test_priv(test);
+ unsigned int max_session_size;
+ uint32_t nb_sessions;
+ int ret;
+
+ if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return 0;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ evt_err("No crypto devices available\n");
+ return -ENODEV;
+ }
+
+ t->ca_op_pool = rte_crypto_op_pool_create(
+ "crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
+ 128, sizeof(union rte_event_crypto_metadata),
+ rte_socket_id());
+ if (t->ca_op_pool == NULL) {
+ evt_err("Failed to create crypto op pool");
+ return -ENOMEM;
+ }
+
+ nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
+ t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
+ "ca_asym_sess_pool", nb_sessions, 0,
+ sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
+ if (t->ca_asym_sess_pool == NULL) {
+ evt_err("Failed to create sym session pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
+ "ca_sess_pool", nb_sessions, 0, 0,
+ sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
+ if (t->ca_sess_pool == NULL) {
+ evt_err("Failed to create sym session pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ max_session_size = 0;
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ unsigned int session_size;
+
+ session_size =
+ rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (session_size > max_session_size)
+ max_session_size = session_size;
+ }
+
+ max_session_size += sizeof(union rte_event_crypto_metadata);
+ t->ca_sess_priv_pool = rte_mempool_create(
+ "ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0, NULL,
+ NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (t->ca_sess_priv_pool == NULL) {
+ evt_err("failed to create sym session private pool");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Calculate number of needed queue pairs, based on the amount of
+ * available number of logical cores and crypto devices. For instance,
+ * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
+ * up per device.
+ */
+ nb_plcores = evt_nr_active_lcores(opt->plcores);
+ nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
+ nb_plcores / cdev_count;
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_info info;
+ int qp_id;
+
+ rte_cryptodev_info_get(cdev_id, &info);
+ if (nb_qps > info.max_nb_queue_pairs) {
+ evt_err("Not enough queue pairs per cryptodev (%u)",
+ nb_qps);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ conf.nb_queue_pairs = nb_qps;
+ conf.socket_id = SOCKET_ID_ANY;
+ conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
+ if (ret) {
+ evt_err("Failed to configure cryptodev (%u)", cdev_id);
+ goto err;
+ }
+
+ qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
+ qp_conf.mp_session = t->ca_sess_pool;
+ qp_conf.mp_session_private = t->ca_sess_priv_pool;
+
+ for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
+ ret = rte_cryptodev_queue_pair_setup(
+ cdev_id, qp_id, &qp_conf,
+ rte_cryptodev_socket_id(cdev_id));
+ if (ret) {
+ evt_err("Failed to setup queue pairs on cryptodev %u\n",
+ cdev_id);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+err:
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
+ rte_cryptodev_close(cdev_id);
+
+ rte_mempool_free(t->ca_op_pool);
+ rte_mempool_free(t->ca_sess_pool);
+ rte_mempool_free(t->ca_sess_priv_pool);
+ rte_mempool_free(t->ca_asym_sess_pool);
+
+ return ret;
+}
+
+void
+perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t cdev_id, cdev_count = rte_cryptodev_count();
+ struct test_perf *t = evt_test_priv(test);
+ uint16_t port;
+
+ if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
+ return;
+
+ for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
+ struct rte_cryptodev_sym_session *sess;
+ struct prod_data *p = &t->prod[port];
+ uint32_t flow_id;
+ uint8_t cdev_id;
+
+ for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
+ sess = p->ca.crypto_sess[flow_id];
+ cdev_id = p->ca.cdev_id;
+ rte_cryptodev_sym_session_clear(cdev_id, sess);
+ rte_cryptodev_sym_session_free(sess);
+ }
+
+ rte_event_crypto_adapter_queue_pair_del(
+ TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
+ }
+
+ rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ rte_cryptodev_stop(cdev_id);
+ rte_cryptodev_close(cdev_id);
+ }
+
+ rte_mempool_free(t->ca_op_pool);
+ rte_mempool_free(t->ca_sess_pool);
+ rte_mempool_free(t->ca_sess_priv_pool);
+ rte_mempool_free(t->ca_asym_sess_pool);
+}
+
int
perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
{