1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_cryptodev.h>
7 #include <rte_malloc.h>
9 #include "rte_cryptodev_scheduler_operations.h"
10 #include "scheduler_pmd_private.h"
12 #define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
13 #define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
15 #define MC_SCHED_BUFFER_SIZE 32
17 #define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
19 /** multi-core scheduler context */
20 struct mc_scheduler_ctx {
21 uint32_t num_workers; /**< Number of workers polling */
24 struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
25 struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
28 struct mc_scheduler_qp_ctx {
29 struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
32 uint32_t last_enq_worker_idx;
33 uint32_t last_deq_worker_idx;
35 struct mc_scheduler_ctx *mc_private_ctx;
39 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
41 struct mc_scheduler_qp_ctx *mc_qp_ctx =
42 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
43 struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
44 uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
45 uint16_t i, processed_ops = 0;
47 if (unlikely(nb_ops == 0))
50 for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
51 struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
52 uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
53 (void *)(&ops[processed_ops]), nb_ops, NULL);
55 nb_ops -= nb_queue_ops;
56 processed_ops += nb_queue_ops;
58 if (++worker_idx == mc_ctx->num_workers)
61 mc_qp_ctx->last_enq_worker_idx = worker_idx;
67 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
70 struct rte_ring *order_ring =
71 ((struct scheduler_qp_ctx *)qp)->order_ring;
72 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
74 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
77 scheduler_order_insert(order_ring, ops, nb_ops_enqd);
84 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
86 struct mc_scheduler_qp_ctx *mc_qp_ctx =
87 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
88 struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
89 uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
90 uint16_t i, processed_ops = 0;
92 for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
93 struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
94 uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
95 (void *)(&ops[processed_ops]), nb_ops, NULL);
98 processed_ops += nb_deq_ops;
99 if (++worker_idx == mc_ctx->num_workers)
103 mc_qp_ctx->last_deq_worker_idx = worker_idx;
105 return processed_ops;
110 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
113 struct rte_ring *order_ring =
114 ((struct scheduler_qp_ctx *)qp)->order_ring;
115 struct rte_crypto_op *op;
116 uint32_t nb_objs, nb_ops_to_deq;
118 nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
123 for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
124 op = ops[nb_ops_to_deq];
125 if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
127 op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
130 rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
131 return nb_ops_to_deq;
135 worker_attach(__rte_unused struct rte_cryptodev *dev,
136 __rte_unused uint8_t worker_id)
142 worker_detach(__rte_unused struct rte_cryptodev *dev,
143 __rte_unused uint8_t worker_id)
149 mc_scheduler_worker(struct rte_cryptodev *dev)
151 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
152 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
153 struct rte_ring *enq_ring;
154 struct rte_ring *deq_ring;
155 uint32_t core_id = rte_lcore_id();
156 int i, worker_idx = -1;
157 struct scheduler_worker *worker;
158 struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
159 struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
160 uint16_t processed_ops;
161 uint16_t pending_enq_ops = 0;
162 uint16_t pending_enq_ops_idx = 0;
163 uint16_t pending_deq_ops = 0;
164 uint16_t pending_deq_ops_idx = 0;
165 uint16_t inflight_ops = 0;
166 const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
168 for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
169 if (sched_ctx->wc_pool[i] == core_id) {
174 if (worker_idx == -1) {
175 CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
180 worker = &sched_ctx->workers[worker_idx];
181 enq_ring = mc_ctx->sched_enq_ring[worker_idx];
182 deq_ring = mc_ctx->sched_deq_ring[worker_idx];
184 while (!mc_ctx->stop_signal) {
185 if (pending_enq_ops) {
187 rte_cryptodev_enqueue_burst(worker->dev_id,
189 &enq_ops[pending_enq_ops_idx],
191 pending_enq_ops -= processed_ops;
192 pending_enq_ops_idx += processed_ops;
193 inflight_ops += processed_ops;
195 processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
196 MC_SCHED_BUFFER_SIZE, NULL);
198 pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
199 worker->dev_id, worker->qp_id,
200 enq_ops, processed_ops);
201 pending_enq_ops = processed_ops - pending_enq_ops_idx;
202 inflight_ops += pending_enq_ops_idx;
206 if (pending_deq_ops) {
207 processed_ops = rte_ring_enqueue_burst(
208 deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
209 pending_deq_ops, NULL);
210 pending_deq_ops -= processed_ops;
211 pending_deq_ops_idx += processed_ops;
212 } else if (inflight_ops) {
213 processed_ops = rte_cryptodev_dequeue_burst(
214 worker->dev_id, worker->qp_id, deq_ops,
215 MC_SCHED_BUFFER_SIZE);
217 inflight_ops -= processed_ops;
218 if (reordering_enabled) {
221 for (j = 0; j < processed_ops; j++) {
222 deq_ops[j]->status |=
223 CRYPTO_OP_STATUS_BIT_COMPLETE;
226 pending_deq_ops_idx = rte_ring_enqueue_burst(
227 deq_ring, (void *)deq_ops, processed_ops,
229 pending_deq_ops = processed_ops -
242 scheduler_start(struct rte_cryptodev *dev)
244 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
245 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
248 mc_ctx->stop_signal = 0;
250 for (i = 0; i < sched_ctx->nb_wc; i++)
251 rte_eal_remote_launch(
252 (lcore_function_t *)mc_scheduler_worker, dev,
253 sched_ctx->wc_pool[i]);
255 if (sched_ctx->reordering_enabled) {
256 dev->enqueue_burst = &schedule_enqueue_ordering;
257 dev->dequeue_burst = &schedule_dequeue_ordering;
259 dev->enqueue_burst = &schedule_enqueue;
260 dev->dequeue_burst = &schedule_dequeue;
263 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
264 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
265 struct mc_scheduler_qp_ctx *mc_qp_ctx =
266 qp_ctx->private_qp_ctx;
269 memset(mc_qp_ctx->workers, 0,
270 RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
271 sizeof(struct scheduler_worker));
272 for (j = 0; j < sched_ctx->nb_workers; j++) {
273 mc_qp_ctx->workers[j].dev_id =
274 sched_ctx->workers[j].dev_id;
275 mc_qp_ctx->workers[j].qp_id = i;
278 mc_qp_ctx->nb_workers = sched_ctx->nb_workers;
280 mc_qp_ctx->last_enq_worker_idx = 0;
281 mc_qp_ctx->last_deq_worker_idx = 0;
288 scheduler_stop(struct rte_cryptodev *dev)
290 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
291 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
294 mc_ctx->stop_signal = 1;
296 for (i = 0; i < sched_ctx->nb_wc; i++)
297 rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
303 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
305 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
306 struct mc_scheduler_qp_ctx *mc_qp_ctx;
307 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
308 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
310 mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
313 CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
317 mc_qp_ctx->mc_private_ctx = mc_ctx;
318 qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
325 scheduler_create_private_ctx(struct rte_cryptodev *dev)
327 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
328 struct mc_scheduler_ctx *mc_ctx = NULL;
331 if (sched_ctx->private_ctx) {
332 rte_free(sched_ctx->private_ctx);
333 sched_ctx->private_ctx = NULL;
336 mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
339 CR_SCHED_LOG(ERR, "failed allocate memory");
343 mc_ctx->num_workers = sched_ctx->nb_wc;
344 for (i = 0; i < sched_ctx->nb_wc; i++) {
347 snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
348 "%u_%u", dev->data->dev_id, i);
349 mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
350 if (!mc_ctx->sched_enq_ring[i]) {
351 mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
352 PER_WORKER_BUFF_SIZE,
354 RING_F_SC_DEQ | RING_F_SP_ENQ);
355 if (!mc_ctx->sched_enq_ring[i]) {
356 CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
361 snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
362 "%u_%u", dev->data->dev_id, i);
363 mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
364 if (!mc_ctx->sched_deq_ring[i]) {
365 mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
366 PER_WORKER_BUFF_SIZE,
368 RING_F_SC_DEQ | RING_F_SP_ENQ);
369 if (!mc_ctx->sched_deq_ring[i]) {
370 CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
377 sched_ctx->private_ctx = (void *)mc_ctx;
382 for (i = 0; i < sched_ctx->nb_wc; i++) {
383 rte_ring_free(mc_ctx->sched_enq_ring[i]);
384 rte_ring_free(mc_ctx->sched_deq_ring[i]);
391 static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
397 scheduler_create_private_ctx,
398 NULL, /* option_set */
399 NULL /* option_get */
402 static struct rte_cryptodev_scheduler mc_scheduler = {
403 .name = "multicore-scheduler",
404 .description = "scheduler which will run burst across multiple cpu cores",
405 .mode = CDEV_SCHED_MODE_MULTICORE,
406 .ops = &scheduler_mc_ops
409 struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;