};
struct mc_scheduler_qp_ctx {
- struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
- uint32_t nb_slaves;
+ struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+ uint32_t nb_workers;
uint32_t last_enq_worker_idx;
uint32_t last_deq_worker_idx;
schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
struct rte_crypto_op *op;
- uint32_t nb_objs = rte_ring_count(order_ring);
- uint32_t nb_ops_to_deq = 0;
- uint32_t nb_ops_deqd = 0;
-
- if (nb_objs > nb_ops)
- nb_objs = nb_ops;
+ uint32_t nb_objs, nb_ops_to_deq;
- while (nb_ops_to_deq < nb_objs) {
- SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+ nb_ops, NULL);
+ if (nb_objs == 0)
+ return 0;
+ for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+ op = ops[nb_ops_to_deq];
if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
break;
-
op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
- nb_ops_to_deq++;
- }
-
- if (nb_ops_to_deq) {
- nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
- (void **)ops, nb_ops_to_deq, NULL);
}
- return nb_ops_deqd;
+ rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+ return nb_ops_to_deq;
}
static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
struct rte_ring *deq_ring;
uint32_t core_id = rte_lcore_id();
int i, worker_idx = -1;
- struct scheduler_slave *slave;
+ struct scheduler_worker *worker;
struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
uint16_t processed_ops;
return -1;
}
- slave = &sched_ctx->slaves[worker_idx];
+ worker = &sched_ctx->workers[worker_idx];
enq_ring = mc_ctx->sched_enq_ring[worker_idx];
deq_ring = mc_ctx->sched_deq_ring[worker_idx];
while (!mc_ctx->stop_signal) {
if (pending_enq_ops) {
processed_ops =
- rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id, &enq_ops[pending_enq_ops_idx],
+ rte_cryptodev_enqueue_burst(worker->dev_id,
+ worker->qp_id,
+ &enq_ops[pending_enq_ops_idx],
pending_enq_ops);
pending_enq_ops -= processed_ops;
pending_enq_ops_idx += processed_ops;
MC_SCHED_BUFFER_SIZE, NULL);
if (processed_ops) {
pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
- slave->dev_id, slave->qp_id,
- enq_ops, processed_ops);
+ worker->dev_id, worker->qp_id,
+ enq_ops, processed_ops);
pending_enq_ops = processed_ops - pending_enq_ops_idx;
inflight_ops += pending_enq_ops_idx;
}
pending_deq_ops -= processed_ops;
pending_deq_ops_idx += processed_ops;
} else if (inflight_ops) {
- processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ processed_ops = rte_cryptodev_dequeue_burst(
+ worker->dev_id, worker->qp_id, deq_ops,
+ MC_SCHED_BUFFER_SIZE);
if (processed_ops) {
inflight_ops -= processed_ops;
if (reordering_enabled) {
qp_ctx->private_qp_ctx;
uint32_t j;
- memset(mc_qp_ctx->slaves, 0,
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
- sizeof(struct scheduler_slave));
- for (j = 0; j < sched_ctx->nb_slaves; j++) {
- mc_qp_ctx->slaves[j].dev_id =
- sched_ctx->slaves[j].dev_id;
- mc_qp_ctx->slaves[j].qp_id = i;
+ memset(mc_qp_ctx->workers, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
+ sizeof(struct scheduler_worker));
+ for (j = 0; j < sched_ctx->nb_workers; j++) {
+ mc_qp_ctx->workers[j].dev_id =
+ sched_ctx->workers[j].dev_id;
+ mc_qp_ctx->workers[j].qp_id = i;
}
- mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+ mc_qp_ctx->nb_workers = sched_ctx->nb_workers;
mc_qp_ctx->last_enq_worker_idx = 0;
mc_qp_ctx->last_deq_worker_idx = 0;
mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
- PER_SLAVE_BUFF_SIZE,
+ PER_WORKER_BUFF_SIZE,
rte_socket_id(),
RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_enq_ring[i]) {
mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
- PER_SLAVE_BUFF_SIZE,
+ PER_WORKER_BUFF_SIZE,
rte_socket_id(),
RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_deq_ring[i]) {
return -1;
}
-struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
- slave_attach,
- slave_detach,
+static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ worker_attach,
+ worker_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
NULL /* option_get */
};
-struct rte_cryptodev_scheduler mc_scheduler = {
+static struct rte_cryptodev_scheduler mc_scheduler = {
.name = "multicore-scheduler",
.description = "scheduler which will run burst across multiple cpu cores",
.mode = CDEV_SCHED_MODE_MULTICORE,
.ops = &scheduler_mc_ops
};
-struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;