8757552f7c0fcd59ac1fc8c9bdac0f743f3b44ec
[dpdk.git] / drivers / crypto / scheduler / scheduler_multicore.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #include <unistd.h>
33
34 #include <rte_cryptodev.h>
35 #include <rte_malloc.h>
36
37 #include "rte_cryptodev_scheduler_operations.h"
38 #include "scheduler_pmd_private.h"
39
40 #define MC_SCHED_ENQ_RING_NAME_PREFIX   "MCS_ENQR_"
41 #define MC_SCHED_DEQ_RING_NAME_PREFIX   "MCS_DEQR_"
42
43 #define MC_SCHED_BUFFER_SIZE 32
44
45 /** multi-core scheduler context */
46 struct mc_scheduler_ctx {
47         uint32_t num_workers;             /**< Number of workers polling */
48         uint32_t stop_signal;
49
50         struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
51         struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
52 };
53
54 struct mc_scheduler_qp_ctx {
55         struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
56         uint32_t nb_slaves;
57
58         uint32_t last_enq_worker_idx;
59         uint32_t last_deq_worker_idx;
60
61         struct mc_scheduler_ctx *mc_private_ctx;
62 };
63
64 static uint16_t
65 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
66 {
67         struct mc_scheduler_qp_ctx *mc_qp_ctx =
68                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
69         struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
70         uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
71         uint16_t i, processed_ops = 0;
72
73         if (unlikely(nb_ops == 0))
74                 return 0;
75
76         for (i = 0; i <  mc_ctx->num_workers && nb_ops != 0; i++) {
77                 struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
78                 uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
79                         (void *)(&ops[processed_ops]), nb_ops, NULL);
80
81                 nb_ops -= nb_queue_ops;
82                 processed_ops += nb_queue_ops;
83
84                 if (++worker_idx == mc_ctx->num_workers)
85                         worker_idx = 0;
86         }
87         mc_qp_ctx->last_enq_worker_idx = worker_idx;
88
89         return processed_ops;
90 }
91
92 static uint16_t
93 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
94                 uint16_t nb_ops)
95 {
96         struct rte_ring *order_ring =
97                         ((struct scheduler_qp_ctx *)qp)->order_ring;
98         uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
99                         nb_ops);
100         uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
101                         nb_ops_to_enq);
102
103         scheduler_order_insert(order_ring, ops, nb_ops_enqd);
104
105         return nb_ops_enqd;
106 }
107
108
109 static uint16_t
110 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
111 {
112         struct mc_scheduler_qp_ctx *mc_qp_ctx =
113                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
114         struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
115         uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
116         uint16_t i, processed_ops = 0;
117
118         for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
119                 struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
120                 uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
121                         (void *)(&ops[processed_ops]), nb_ops, NULL);
122
123                 nb_ops -= nb_deq_ops;
124                 processed_ops += nb_deq_ops;
125                 if (++worker_idx == mc_ctx->num_workers)
126                         worker_idx = 0;
127         }
128
129         mc_qp_ctx->last_deq_worker_idx = worker_idx;
130
131         return processed_ops;
132
133 }
134
135 static uint16_t
136 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
137                 uint16_t nb_ops)
138 {
139         struct rte_ring *order_ring =
140                         ((struct scheduler_qp_ctx *)qp)->order_ring;
141
142         return scheduler_order_drain(order_ring, ops, nb_ops);
143 }
144
145 static int
146 slave_attach(__rte_unused struct rte_cryptodev *dev,
147                 __rte_unused uint8_t slave_id)
148 {
149         return 0;
150 }
151
152 static int
153 slave_detach(__rte_unused struct rte_cryptodev *dev,
154                 __rte_unused uint8_t slave_id)
155 {
156         return 0;
157 }
158
159 static int
160 mc_scheduler_worker(struct rte_cryptodev *dev)
161 {
162         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
163         struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
164         struct rte_ring *enq_ring;
165         struct rte_ring *deq_ring;
166         uint32_t core_id = rte_lcore_id();
167         int i, worker_idx = -1;
168         struct scheduler_slave *slave;
169         struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
170         struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
171         struct scheduler_session *sess0, *sess1, *sess2, *sess3;
172         uint16_t processed_ops;
173         uint16_t left_op = 0;
174         uint16_t left_op_idx = 0;
175         uint16_t inflight_ops = 0;
176
177         for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
178                 if (sched_ctx->wc_pool[i] == core_id) {
179                         worker_idx = i;
180                         break;
181                 }
182         }
183         if (worker_idx == -1) {
184                 CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
185                 return -1;
186         }
187
188         slave = &sched_ctx->slaves[worker_idx];
189         enq_ring = mc_ctx->sched_enq_ring[worker_idx];
190         deq_ring = mc_ctx->sched_deq_ring[worker_idx];
191
192         while (!mc_ctx->stop_signal) {
193                 if (left_op) {
194                         processed_ops =
195                                 rte_cryptodev_enqueue_burst(slave->dev_id,
196                                                 slave->qp_id,
197                                                 &enq_ops[left_op_idx], left_op);
198
199                         left_op -= processed_ops;
200                         left_op_idx += processed_ops;
201                 } else {
202                         uint16_t nb_deq_ops = rte_ring_dequeue_burst(enq_ring,
203                                 (void *)enq_ops, MC_SCHED_BUFFER_SIZE, NULL);
204                         if (nb_deq_ops) {
205                                 uint16_t i;
206
207                                 for (i = 0; i < nb_deq_ops && i < 4; i++)
208                                         rte_prefetch0(enq_ops[i]->sym->session);
209
210                                 for (i = 0; (i < (nb_deq_ops - 8))
211                                                         && (nb_deq_ops > 8); i += 4) {
212                                         sess0 = (struct scheduler_session *)
213                                                 enq_ops[i]->sym->session->_private;
214                                         sess1 = (struct scheduler_session *)
215                                                 enq_ops[i+1]->sym->session->_private;
216                                         sess2 = (struct scheduler_session *)
217                                                 enq_ops[i+2]->sym->session->_private;
218                                         sess3 = (struct scheduler_session *)
219                                                 enq_ops[i+3]->sym->session->_private;
220
221                                         enq_ops[i]->sym->session =
222                                                         sess0->sessions[worker_idx];
223                                         enq_ops[i + 1]->sym->session =
224                                                         sess1->sessions[worker_idx];
225                                         enq_ops[i + 2]->sym->session =
226                                                         sess2->sessions[worker_idx];
227                                         enq_ops[i + 3]->sym->session =
228                                                         sess3->sessions[worker_idx];
229
230                                         rte_prefetch0(enq_ops[i + 4]->sym->session);
231                                         rte_prefetch0(enq_ops[i + 5]->sym->session);
232                                         rte_prefetch0(enq_ops[i + 6]->sym->session);
233                                         rte_prefetch0(enq_ops[i + 7]->sym->session);
234                                 }
235
236                                 for (; i < nb_deq_ops; i++) {
237                                         sess0 = (struct scheduler_session *)
238                                                 enq_ops[i]->sym->session->_private;
239                                         enq_ops[i]->sym->session =
240                                                         sess0->sessions[worker_idx];
241                                 }
242
243                                 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
244                                                 slave->qp_id, enq_ops, nb_deq_ops);
245
246                                 if (unlikely(processed_ops < nb_deq_ops)) {
247                                         left_op = nb_deq_ops - processed_ops;
248                                         left_op_idx = processed_ops;
249                                 }
250
251                                 inflight_ops += processed_ops;
252                         }
253                 }
254
255                 if (inflight_ops > 0) {
256                         processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
257                                         slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
258                         if (processed_ops) {
259                                 uint16_t nb_enq_ops = rte_ring_enqueue_burst(deq_ring,
260                                         (void *)deq_ops, processed_ops, NULL);
261                                 inflight_ops -= nb_enq_ops;
262                         }
263                 }
264
265                 rte_pause();
266         }
267
268         return 0;
269 }
270
271 static int
272 scheduler_start(struct rte_cryptodev *dev)
273 {
274         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
275         struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
276         uint16_t i;
277
278         mc_ctx->stop_signal = 0;
279
280         for (i = 0; i < sched_ctx->nb_wc; i++)
281                 rte_eal_remote_launch(
282                         (lcore_function_t *)mc_scheduler_worker, dev,
283                                         sched_ctx->wc_pool[i]);
284
285         if (sched_ctx->reordering_enabled) {
286                 dev->enqueue_burst = &schedule_enqueue_ordering;
287                 dev->dequeue_burst = &schedule_dequeue_ordering;
288         } else {
289                 dev->enqueue_burst = &schedule_enqueue;
290                 dev->dequeue_burst = &schedule_dequeue;
291         }
292
293         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
294                 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
295                 struct mc_scheduler_qp_ctx *mc_qp_ctx =
296                                 qp_ctx->private_qp_ctx;
297                 uint32_t j;
298
299                 memset(mc_qp_ctx->slaves, 0,
300                                 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
301                                 sizeof(struct scheduler_slave));
302                 for (j = 0; j < sched_ctx->nb_slaves; j++) {
303                         mc_qp_ctx->slaves[j].dev_id =
304                                         sched_ctx->slaves[j].dev_id;
305                         mc_qp_ctx->slaves[j].qp_id = i;
306                 }
307
308                 mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
309
310                 mc_qp_ctx->last_enq_worker_idx = 0;
311                 mc_qp_ctx->last_deq_worker_idx = 0;
312         }
313
314         return 0;
315 }
316
317 static int
318 scheduler_stop(struct rte_cryptodev *dev)
319 {
320         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
321         struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
322
323         mc_ctx->stop_signal = 1;
324
325         for (uint16_t i = 0; i < sched_ctx->nb_wc; i++)
326                 rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
327
328         return 0;
329 }
330
331 static int
332 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
333 {
334         struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
335         struct mc_scheduler_qp_ctx *mc_qp_ctx;
336         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
337         struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
338
339         mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
340                         rte_socket_id());
341         if (!mc_qp_ctx) {
342                 CS_LOG_ERR("failed allocate memory for private queue pair");
343                 return -ENOMEM;
344         }
345
346         mc_qp_ctx->mc_private_ctx = mc_ctx;
347         qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
348
349
350         return 0;
351 }
352
353 static int
354 scheduler_create_private_ctx(struct rte_cryptodev *dev)
355 {
356         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
357         struct mc_scheduler_ctx *mc_ctx;
358
359         if (sched_ctx->private_ctx)
360                 rte_free(sched_ctx->private_ctx);
361
362         mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
363                         rte_socket_id());
364         if (!mc_ctx) {
365                 CS_LOG_ERR("failed allocate memory");
366                 return -ENOMEM;
367         }
368
369         mc_ctx->num_workers = sched_ctx->nb_wc;
370         for (uint16_t i = 0; i < sched_ctx->nb_wc; i++) {
371                 char r_name[16];
372
373                 snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
374                 mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
375                                         rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
376                 if (!mc_ctx->sched_enq_ring[i]) {
377                         CS_LOG_ERR("Cannot create ring for worker %u", i);
378                         return -1;
379                 }
380                 snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
381                 mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
382                                         rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
383                 if (!mc_ctx->sched_deq_ring[i]) {
384                         CS_LOG_ERR("Cannot create ring for worker %u", i);
385                         return -1;
386                 }
387         }
388
389         sched_ctx->private_ctx = (void *)mc_ctx;
390
391         return 0;
392 }
393
394 struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
395         slave_attach,
396         slave_detach,
397         scheduler_start,
398         scheduler_stop,
399         scheduler_config_qp,
400         scheduler_create_private_ctx,
401         NULL,   /* option_set */
402         NULL    /* option_get */
403 };
404
405 struct rte_cryptodev_scheduler mc_scheduler = {
406                 .name = "multicore-scheduler",
407                 .description = "scheduler which will run burst across multiple cpu cores",
408                 .mode = CDEV_SCHED_MODE_MULTICORE,
409                 .ops = &scheduler_mc_ops
410 };
411
412 struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;