-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <unistd.h>
#define MC_SCHED_BUFFER_SIZE 32
+#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
+
/** multi-core scheduler context */
struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
uint32_t stop_signal;
- struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
- struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
};
struct mc_scheduler_qp_ctx {
- struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
- uint32_t nb_slaves;
+ struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+ uint32_t nb_workers;
uint32_t last_enq_worker_idx;
uint32_t last_deq_worker_idx;
uint16_t nb_ops)
{
struct rte_ring *order_ring =
- ((struct scheduler_qp_ctx *)qp)->order_ring;
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_crypto_op *op;
+ uint32_t nb_objs, nb_ops_to_deq;
+
+ nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+ nb_ops, NULL);
+ if (nb_objs == 0)
+ return 0;
- return scheduler_order_drain(order_ring, ops, nb_ops);
+ for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+ op = ops[nb_ops_to_deq];
+ if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
+ break;
+ op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
+ }
+
+ rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+ return nb_ops_to_deq;
}
static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
struct rte_ring *deq_ring;
uint32_t core_id = rte_lcore_id();
int i, worker_idx = -1;
- struct scheduler_slave *slave;
+ struct scheduler_worker *worker;
struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
uint16_t processed_ops;
- uint16_t left_op = 0;
- uint16_t left_op_idx = 0;
+ uint16_t pending_enq_ops = 0;
+ uint16_t pending_enq_ops_idx = 0;
+ uint16_t pending_deq_ops = 0;
+ uint16_t pending_deq_ops_idx = 0;
uint16_t inflight_ops = 0;
+ const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
if (sched_ctx->wc_pool[i] == core_id) {
}
}
if (worker_idx == -1) {
- CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+ core_id);
return -1;
}
- slave = &sched_ctx->slaves[worker_idx];
+ worker = &sched_ctx->workers[worker_idx];
enq_ring = mc_ctx->sched_enq_ring[worker_idx];
deq_ring = mc_ctx->sched_deq_ring[worker_idx];
while (!mc_ctx->stop_signal) {
- if (left_op) {
+ if (pending_enq_ops) {
processed_ops =
- rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id,
- &enq_ops[left_op_idx], left_op);
-
- left_op -= processed_ops;
- left_op_idx += processed_ops;
+ rte_cryptodev_enqueue_burst(worker->dev_id,
+ worker->qp_id,
+ &enq_ops[pending_enq_ops_idx],
+ pending_enq_ops);
+ pending_enq_ops -= processed_ops;
+ pending_enq_ops_idx += processed_ops;
+ inflight_ops += processed_ops;
} else {
- uint16_t nb_deq_ops = rte_ring_dequeue_burst(enq_ring,
- (void *)enq_ops, MC_SCHED_BUFFER_SIZE, NULL);
- if (nb_deq_ops) {
- processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id, enq_ops, nb_deq_ops);
-
- if (unlikely(processed_ops < nb_deq_ops)) {
- left_op = nb_deq_ops - processed_ops;
- left_op_idx = processed_ops;
- }
-
- inflight_ops += processed_ops;
+ processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
+ MC_SCHED_BUFFER_SIZE, NULL);
+ if (processed_ops) {
+ pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
+ worker->dev_id, worker->qp_id,
+ enq_ops, processed_ops);
+ pending_enq_ops = processed_ops - pending_enq_ops_idx;
+ inflight_ops += pending_enq_ops_idx;
}
}
- if (inflight_ops > 0) {
- processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ if (pending_deq_ops) {
+ processed_ops = rte_ring_enqueue_burst(
+ deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
+ pending_deq_ops, NULL);
+ pending_deq_ops -= processed_ops;
+ pending_deq_ops_idx += processed_ops;
+ } else if (inflight_ops) {
+ processed_ops = rte_cryptodev_dequeue_burst(
+ worker->dev_id, worker->qp_id, deq_ops,
+ MC_SCHED_BUFFER_SIZE);
if (processed_ops) {
- uint16_t nb_enq_ops = rte_ring_enqueue_burst(deq_ring,
- (void *)deq_ops, processed_ops, NULL);
- inflight_ops -= nb_enq_ops;
+ inflight_ops -= processed_ops;
+ if (reordering_enabled) {
+ uint16_t j;
+
+ for (j = 0; j < processed_ops; j++) {
+ deq_ops[j]->status |=
+ CRYPTO_OP_STATUS_BIT_COMPLETE;
+ }
+ } else {
+ pending_deq_ops_idx = rte_ring_enqueue_burst(
+ deq_ring, (void *)deq_ops, processed_ops,
+ NULL);
+ pending_deq_ops = processed_ops -
+ pending_deq_ops_idx;
+ }
}
}
qp_ctx->private_qp_ctx;
uint32_t j;
- memset(mc_qp_ctx->slaves, 0,
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
- sizeof(struct scheduler_slave));
- for (j = 0; j < sched_ctx->nb_slaves; j++) {
- mc_qp_ctx->slaves[j].dev_id =
- sched_ctx->slaves[j].dev_id;
- mc_qp_ctx->slaves[j].qp_id = i;
+ memset(mc_qp_ctx->workers, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
+ sizeof(struct scheduler_worker));
+ for (j = 0; j < sched_ctx->nb_workers; j++) {
+ mc_qp_ctx->workers[j].dev_id =
+ sched_ctx->workers[j].dev_id;
+ mc_qp_ctx->workers[j].qp_id = i;
}
- mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+ mc_qp_ctx->nb_workers = sched_ctx->nb_workers;
mc_qp_ctx->last_enq_worker_idx = 0;
mc_qp_ctx->last_deq_worker_idx = 0;
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
mc_ctx->stop_signal = 1;
- for (uint16_t i = 0; i < sched_ctx->nb_wc; i++)
+ for (i = 0; i < sched_ctx->nb_wc; i++)
rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
return 0;
mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
rte_socket_id());
if (!mc_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
scheduler_create_private_ctx(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- struct mc_scheduler_ctx *mc_ctx;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
+ uint16_t i;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
if (!mc_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
mc_ctx->num_workers = sched_ctx->nb_wc;
- for (uint16_t i = 0; i < sched_ctx->nb_wc; i++) {
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
char r_name[16];
- snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_WORKER_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
- snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_WORKER_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
}
sched_ctx->private_ctx = (void *)mc_ctx;
return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
}
-struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
- slave_attach,
- slave_detach,
+static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ worker_attach,
+ worker_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
NULL /* option_get */
};
-struct rte_cryptodev_scheduler mc_scheduler = {
+static struct rte_cryptodev_scheduler mc_scheduler = {
.name = "multicore-scheduler",
.description = "scheduler which will run burst across multiple cpu cores",
.mode = CDEV_SCHED_MODE_MULTICORE,
.ops = &scheduler_mc_ops
};
-struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;