1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_cryptodev.h>
7 #include <rte_malloc.h>
9 #include "rte_cryptodev_scheduler_operations.h"
10 #include "scheduler_pmd_private.h"
12 #define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
13 #define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
15 #define MC_SCHED_BUFFER_SIZE 32
17 #define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
19 /** multi-core scheduler context */
20 struct mc_scheduler_ctx {
21 uint32_t num_workers; /**< Number of workers polling */
24 struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
25 struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
28 struct mc_scheduler_qp_ctx {
29 struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
32 uint32_t last_enq_worker_idx;
33 uint32_t last_deq_worker_idx;
35 struct mc_scheduler_ctx *mc_private_ctx;
39 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
41 struct mc_scheduler_qp_ctx *mc_qp_ctx =
42 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
43 struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
44 uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
45 uint16_t i, processed_ops = 0;
47 if (unlikely(nb_ops == 0))
50 for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
51 struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
52 uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
53 (void *)(&ops[processed_ops]), nb_ops, NULL);
55 nb_ops -= nb_queue_ops;
56 processed_ops += nb_queue_ops;
58 if (++worker_idx == mc_ctx->num_workers)
61 mc_qp_ctx->last_enq_worker_idx = worker_idx;
67 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
70 struct rte_ring *order_ring =
71 ((struct scheduler_qp_ctx *)qp)->order_ring;
72 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
74 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
77 scheduler_order_insert(order_ring, ops, nb_ops_enqd);
84 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
86 struct mc_scheduler_qp_ctx *mc_qp_ctx =
87 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
88 struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
89 uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
90 uint16_t i, processed_ops = 0;
92 for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
93 struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
94 uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
95 (void *)(&ops[processed_ops]), nb_ops, NULL);
98 processed_ops += nb_deq_ops;
99 if (++worker_idx == mc_ctx->num_workers)
103 mc_qp_ctx->last_deq_worker_idx = worker_idx;
105 return processed_ops;
110 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
113 struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
114 struct rte_crypto_op *op;
115 uint32_t nb_objs = rte_ring_count(order_ring);
116 uint32_t nb_ops_to_deq = 0;
117 uint32_t nb_ops_deqd = 0;
119 if (nb_objs > nb_ops)
122 while (nb_ops_to_deq < nb_objs) {
123 SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
125 if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
128 op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
133 nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
134 (void **)ops, nb_ops_to_deq, NULL);
141 slave_attach(__rte_unused struct rte_cryptodev *dev,
142 __rte_unused uint8_t slave_id)
148 slave_detach(__rte_unused struct rte_cryptodev *dev,
149 __rte_unused uint8_t slave_id)
155 mc_scheduler_worker(struct rte_cryptodev *dev)
157 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
158 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
159 struct rte_ring *enq_ring;
160 struct rte_ring *deq_ring;
161 uint32_t core_id = rte_lcore_id();
162 int i, worker_idx = -1;
163 struct scheduler_slave *slave;
164 struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
165 struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
166 uint16_t processed_ops;
167 uint16_t pending_enq_ops = 0;
168 uint16_t pending_enq_ops_idx = 0;
169 uint16_t pending_deq_ops = 0;
170 uint16_t pending_deq_ops_idx = 0;
171 uint16_t inflight_ops = 0;
172 const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
174 for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
175 if (sched_ctx->wc_pool[i] == core_id) {
180 if (worker_idx == -1) {
181 CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
185 slave = &sched_ctx->slaves[worker_idx];
186 enq_ring = mc_ctx->sched_enq_ring[worker_idx];
187 deq_ring = mc_ctx->sched_deq_ring[worker_idx];
189 while (!mc_ctx->stop_signal) {
190 if (pending_enq_ops) {
192 rte_cryptodev_enqueue_burst(slave->dev_id,
193 slave->qp_id, &enq_ops[pending_enq_ops_idx],
195 pending_enq_ops -= processed_ops;
196 pending_enq_ops_idx += processed_ops;
197 inflight_ops += processed_ops;
199 processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
200 MC_SCHED_BUFFER_SIZE, NULL);
202 pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
203 slave->dev_id, slave->qp_id,
204 enq_ops, processed_ops);
205 pending_enq_ops = processed_ops - pending_enq_ops_idx;
206 inflight_ops += pending_enq_ops_idx;
210 if (pending_deq_ops) {
211 processed_ops = rte_ring_enqueue_burst(
212 deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
213 pending_deq_ops, NULL);
214 pending_deq_ops -= processed_ops;
215 pending_deq_ops_idx += processed_ops;
216 } else if (inflight_ops) {
217 processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
218 slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
220 inflight_ops -= processed_ops;
221 if (reordering_enabled) {
224 for (j = 0; j < processed_ops; j++) {
225 deq_ops[j]->status |=
226 CRYPTO_OP_STATUS_BIT_COMPLETE;
229 pending_deq_ops_idx = rte_ring_enqueue_burst(
230 deq_ring, (void *)deq_ops, processed_ops,
232 pending_deq_ops = processed_ops -
245 scheduler_start(struct rte_cryptodev *dev)
247 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
248 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
251 mc_ctx->stop_signal = 0;
253 for (i = 0; i < sched_ctx->nb_wc; i++)
254 rte_eal_remote_launch(
255 (lcore_function_t *)mc_scheduler_worker, dev,
256 sched_ctx->wc_pool[i]);
258 if (sched_ctx->reordering_enabled) {
259 dev->enqueue_burst = &schedule_enqueue_ordering;
260 dev->dequeue_burst = &schedule_dequeue_ordering;
262 dev->enqueue_burst = &schedule_enqueue;
263 dev->dequeue_burst = &schedule_dequeue;
266 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
267 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
268 struct mc_scheduler_qp_ctx *mc_qp_ctx =
269 qp_ctx->private_qp_ctx;
272 memset(mc_qp_ctx->slaves, 0,
273 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
274 sizeof(struct scheduler_slave));
275 for (j = 0; j < sched_ctx->nb_slaves; j++) {
276 mc_qp_ctx->slaves[j].dev_id =
277 sched_ctx->slaves[j].dev_id;
278 mc_qp_ctx->slaves[j].qp_id = i;
281 mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
283 mc_qp_ctx->last_enq_worker_idx = 0;
284 mc_qp_ctx->last_deq_worker_idx = 0;
291 scheduler_stop(struct rte_cryptodev *dev)
293 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
294 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
297 mc_ctx->stop_signal = 1;
299 for (i = 0; i < sched_ctx->nb_wc; i++)
300 rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
306 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
308 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
309 struct mc_scheduler_qp_ctx *mc_qp_ctx;
310 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
311 struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
313 mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
316 CS_LOG_ERR("failed allocate memory for private queue pair");
320 mc_qp_ctx->mc_private_ctx = mc_ctx;
321 qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
328 scheduler_create_private_ctx(struct rte_cryptodev *dev)
330 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
331 struct mc_scheduler_ctx *mc_ctx = NULL;
334 if (sched_ctx->private_ctx) {
335 rte_free(sched_ctx->private_ctx);
336 sched_ctx->private_ctx = NULL;
339 mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
342 CS_LOG_ERR("failed allocate memory");
346 mc_ctx->num_workers = sched_ctx->nb_wc;
347 for (i = 0; i < sched_ctx->nb_wc; i++) {
350 snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
351 mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
352 rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
353 if (!mc_ctx->sched_enq_ring[i]) {
354 CS_LOG_ERR("Cannot create ring for worker %u", i);
357 snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
358 mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
359 rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
360 if (!mc_ctx->sched_deq_ring[i]) {
361 CS_LOG_ERR("Cannot create ring for worker %u", i);
366 sched_ctx->private_ctx = (void *)mc_ctx;
371 for (i = 0; i < sched_ctx->nb_wc; i++) {
372 rte_ring_free(mc_ctx->sched_enq_ring[i]);
373 rte_ring_free(mc_ctx->sched_deq_ring[i]);
380 struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
386 scheduler_create_private_ctx,
387 NULL, /* option_set */
388 NULL /* option_get */
391 struct rte_cryptodev_scheduler mc_scheduler = {
392 .name = "multicore-scheduler",
393 .description = "scheduler which will run burst across multiple cpu cores",
394 .mode = CDEV_SCHED_MODE_MULTICORE,
395 .ops = &scheduler_mc_ops
398 struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;