-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <rte_cryptodev.h>
struct scheduler_slave primary_slave;
struct scheduler_slave secondary_slave;
uint32_t threshold;
- uint32_t max_nb_objs;
uint8_t deq_idx;
} __rte_cache_aligned;
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
- struct psd_scheduler_qp_ctx *qp_ctx =
- ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
- struct scheduler_session *sess;
uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
- qp_ctx->primary_slave.nb_inflight_cops,
- qp_ctx->secondary_slave.nb_inflight_cops
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
};
struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
rte_prefetch0(ops[i + 7]->sym);
rte_prefetch0(ops[i + 7]->sym->session);
- sess = (struct scheduler_session *)
- ops[i]->sym->session->_private;
/* job_len is initialized as cipher data length, once
* it is 0, equals to auth data length
*/
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
/* decide the target op based on the job length */
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
/* stop schedule cops before the queue is full, this shall
* prevent the failed enqueue
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
- ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
job_len = ops[i+1]->sym->cipher.data.length;
job_len += (ops[i+1]->sym->cipher.data.length == 0) *
ops[i+1]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
- ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
job_len = ops[i+2]->sym->cipher.data.length;
job_len += (ops[i+2]->sym->cipher.data.length == 0) *
ops[i+2]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
- ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
job_len = ops[i+3]->sym->cipher.data.length;
job_len += (ops[i+3]->sym->cipher.data.length == 0) *
ops[i+3]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
- ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
}
for (; i < nb_ops; i++) {
- sess = (struct scheduler_session *)
- ops[i]->sym->session->_private;
-
job_len = ops[i]->sym->cipher.data.length;
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
- ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
}
processed_ops_pri = rte_cryptodev_enqueue_burst(
- qp_ctx->primary_slave.dev_id,
- qp_ctx->primary_slave.qp_id,
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
sched_ops[PRIMARY_SLAVE_IDX],
enq_ops[PRIMARY_SLAVE_IDX].pos);
/* enqueue shall not fail as the slave queue is monitored */
RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
- qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
processed_ops_sec = rte_cryptodev_enqueue_burst(
- qp_ctx->secondary_slave.dev_id,
- qp_ctx->secondary_slave.qp_id,
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
sched_ops[SECONDARY_SLAVE_IDX],
enq_ops[SECONDARY_SLAVE_IDX].pos);
RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
- qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
return processed_ops_pri + processed_ops_sec;
}
/* for packet size based scheduler, nb_slaves have to >= 2 */
if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
- CS_LOG_ERR("not enough slaves to start");
+ CR_SCHED_LOG(ERR, "not enough slaves to start");
return -1;
}
ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
ps_qp_ctx->threshold = psd_ctx->threshold;
-
- ps_qp_ctx->max_nb_objs = sched_ctx->qp_conf.nb_descriptors;
}
if (sched_ctx->reordering_enabled) {
if (ps_qp_ctx->primary_slave.nb_inflight_cops +
ps_qp_ctx->secondary_slave.nb_inflight_cops) {
- CS_LOG_ERR("Some crypto ops left in slave queue");
+ CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
return -1;
}
}
ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
rte_socket_id());
if (!ps_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct psd_scheduler_ctx *psd_ctx;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
rte_socket_id());
if (!psd_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
return 0;
}
+static int
+scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ uint32_t threshold;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CR_SCHED_LOG(ERR, "Option not supported");
+ return -EINVAL;
+ }
+
+ threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
+ option)->threshold;
+ if (!rte_is_power_of_2(threshold)) {
+ CR_SCHED_LOG(ERR, "Threshold is not power of 2");
+ return -EINVAL;
+ }
+
+ psd_ctx->threshold = ~(threshold - 1);
+
+ return 0;
+}
+
+static int
+scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ struct rte_cryptodev_scheduler_threshold_option *threshold_option;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CR_SCHED_LOG(ERR, "Option not supported");
+ return -EINVAL;
+ }
+
+ threshold_option = option;
+ threshold_option->threshold = (~psd_ctx->threshold) + 1;
+
+ return 0;
+}
-struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
slave_attach,
slave_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
scheduler_create_private_ctx,
+ scheduler_option_set,
+ scheduler_option_get
};
-struct rte_cryptodev_scheduler psd_scheduler = {
+static struct rte_cryptodev_scheduler psd_scheduler = {
.name = "packet-size-based-scheduler",
.description = "scheduler which will distribute crypto op "
"burst based on the packet size",
.ops = &scheduler_ps_ops
};
-struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr = &psd_scheduler;