crypto/scheduler: support mode specific option
[dpdk.git] / drivers / crypto / scheduler / scheduler_pkt_size_distr.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
35
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
38
39 #define DEF_PKT_SIZE_THRESHOLD                  (0xffffff80)
40 #define SLAVE_IDX_SWITCH_MASK                   (0x01)
41 #define PRIMARY_SLAVE_IDX                       0
42 #define SECONDARY_SLAVE_IDX                     1
43 #define NB_PKT_SIZE_SLAVES                      2
44
45 /** pkt size based scheduler context */
46 struct psd_scheduler_ctx {
47         uint32_t threshold;
48 };
49
50 /** pkt size based scheduler queue pair context */
51 struct psd_scheduler_qp_ctx {
52         struct scheduler_slave primary_slave;
53         struct scheduler_slave secondary_slave;
54         uint32_t threshold;
55         uint32_t max_nb_objs;
56         uint8_t deq_idx;
57 } __rte_cache_aligned;
58
59 /** scheduling operation variables' wrapping */
60 struct psd_schedule_op {
61         uint8_t slave_idx;
62         uint16_t pos;
63 };
64
65 static uint16_t
66 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
67 {
68         struct psd_scheduler_qp_ctx *qp_ctx =
69                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
70         struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
71         struct scheduler_session *sess;
72         uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
73                         qp_ctx->primary_slave.nb_inflight_cops,
74                         qp_ctx->secondary_slave.nb_inflight_cops
75         };
76         struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
77                 {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
78         };
79         struct psd_schedule_op *p_enq_op;
80         uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
81         uint32_t job_len;
82
83         if (unlikely(nb_ops == 0))
84                 return 0;
85
86         for (i = 0; i < nb_ops && i < 4; i++) {
87                 rte_prefetch0(ops[i]->sym);
88                 rte_prefetch0(ops[i]->sym->session);
89         }
90
91         for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
92                 rte_prefetch0(ops[i + 4]->sym);
93                 rte_prefetch0(ops[i + 4]->sym->session);
94                 rte_prefetch0(ops[i + 5]->sym);
95                 rte_prefetch0(ops[i + 5]->sym->session);
96                 rte_prefetch0(ops[i + 6]->sym);
97                 rte_prefetch0(ops[i + 6]->sym->session);
98                 rte_prefetch0(ops[i + 7]->sym);
99                 rte_prefetch0(ops[i + 7]->sym->session);
100
101                 sess = (struct scheduler_session *)
102                                 ops[i]->sym->session->_private;
103                 /* job_len is initialized as cipher data length, once
104                  * it is 0, equals to auth data length
105                  */
106                 job_len = ops[i]->sym->cipher.data.length;
107                 job_len += (ops[i]->sym->cipher.data.length == 0) *
108                                 ops[i]->sym->auth.data.length;
109                 /* decide the target op based on the job length */
110                 p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
111
112                 /* stop schedule cops before the queue is full, this shall
113                  * prevent the failed enqueue
114                  */
115                 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
116                                 qp_ctx->max_nb_objs) {
117                         i = nb_ops;
118                         break;
119                 }
120
121                 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
122                 ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
123                 p_enq_op->pos++;
124
125                 sess = (struct scheduler_session *)
126                                 ops[i+1]->sym->session->_private;
127                 job_len = ops[i+1]->sym->cipher.data.length;
128                 job_len += (ops[i+1]->sym->cipher.data.length == 0) *
129                                 ops[i+1]->sym->auth.data.length;
130                 p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
131
132                 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
133                                 qp_ctx->max_nb_objs) {
134                         i = nb_ops;
135                         break;
136                 }
137
138                 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
139                 ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
140                 p_enq_op->pos++;
141
142                 sess = (struct scheduler_session *)
143                                 ops[i+2]->sym->session->_private;
144                 job_len = ops[i+2]->sym->cipher.data.length;
145                 job_len += (ops[i+2]->sym->cipher.data.length == 0) *
146                                 ops[i+2]->sym->auth.data.length;
147                 p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
148
149                 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
150                                 qp_ctx->max_nb_objs) {
151                         i = nb_ops;
152                         break;
153                 }
154
155                 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
156                 ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
157                 p_enq_op->pos++;
158
159                 sess = (struct scheduler_session *)
160                                 ops[i+3]->sym->session->_private;
161
162                 job_len = ops[i+3]->sym->cipher.data.length;
163                 job_len += (ops[i+3]->sym->cipher.data.length == 0) *
164                                 ops[i+3]->sym->auth.data.length;
165                 p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
166
167                 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
168                                 qp_ctx->max_nb_objs) {
169                         i = nb_ops;
170                         break;
171                 }
172
173                 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
174                 ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
175                 p_enq_op->pos++;
176         }
177
178         for (; i < nb_ops; i++) {
179                 sess = (struct scheduler_session *)
180                                 ops[i]->sym->session->_private;
181
182                 job_len = ops[i]->sym->cipher.data.length;
183                 job_len += (ops[i]->sym->cipher.data.length == 0) *
184                                 ops[i]->sym->auth.data.length;
185                 p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
186
187                 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
188                                 qp_ctx->max_nb_objs) {
189                         i = nb_ops;
190                         break;
191                 }
192
193                 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
194                 ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
195                 p_enq_op->pos++;
196         }
197
198         processed_ops_pri = rte_cryptodev_enqueue_burst(
199                         qp_ctx->primary_slave.dev_id,
200                         qp_ctx->primary_slave.qp_id,
201                         sched_ops[PRIMARY_SLAVE_IDX],
202                         enq_ops[PRIMARY_SLAVE_IDX].pos);
203         /* enqueue shall not fail as the slave queue is monitored */
204         RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
205
206         qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
207
208         processed_ops_sec = rte_cryptodev_enqueue_burst(
209                         qp_ctx->secondary_slave.dev_id,
210                         qp_ctx->secondary_slave.qp_id,
211                         sched_ops[SECONDARY_SLAVE_IDX],
212                         enq_ops[SECONDARY_SLAVE_IDX].pos);
213         RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
214
215         qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
216
217         return processed_ops_pri + processed_ops_sec;
218 }
219
220 static uint16_t
221 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
222                 uint16_t nb_ops)
223 {
224         struct rte_ring *order_ring =
225                         ((struct scheduler_qp_ctx *)qp)->order_ring;
226         uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
227                         nb_ops);
228         uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
229                         nb_ops_to_enq);
230
231         scheduler_order_insert(order_ring, ops, nb_ops_enqd);
232
233         return nb_ops_enqd;
234 }
235
236 static uint16_t
237 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
238 {
239         struct psd_scheduler_qp_ctx *qp_ctx =
240                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
241         struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
242                         &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
243         struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
244         uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
245
246         if (slave->nb_inflight_cops) {
247                 nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
248                         slave->qp_id, ops, nb_ops);
249                 slave->nb_inflight_cops -= nb_deq_ops_pri;
250         }
251
252         qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
253
254         if (nb_deq_ops_pri == nb_ops)
255                 return nb_deq_ops_pri;
256
257         slave = slaves[qp_ctx->deq_idx];
258
259         if (slave->nb_inflight_cops) {
260                 nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
261                                 slave->qp_id, &ops[nb_deq_ops_pri],
262                                 nb_ops - nb_deq_ops_pri);
263                 slave->nb_inflight_cops -= nb_deq_ops_sec;
264
265                 if (!slave->nb_inflight_cops)
266                         qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
267                                         SLAVE_IDX_SWITCH_MASK;
268         }
269
270         return nb_deq_ops_pri + nb_deq_ops_sec;
271 }
272
273 static uint16_t
274 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
275                 uint16_t nb_ops)
276 {
277         struct rte_ring *order_ring =
278                         ((struct scheduler_qp_ctx *)qp)->order_ring;
279
280         schedule_dequeue(qp, ops, nb_ops);
281
282         return scheduler_order_drain(order_ring, ops, nb_ops);
283 }
284
285 static int
286 slave_attach(__rte_unused struct rte_cryptodev *dev,
287                 __rte_unused uint8_t slave_id)
288 {
289         return 0;
290 }
291
292 static int
293 slave_detach(__rte_unused struct rte_cryptodev *dev,
294                 __rte_unused uint8_t slave_id)
295 {
296         return 0;
297 }
298
299 static int
300 scheduler_start(struct rte_cryptodev *dev)
301 {
302         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
303         struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
304         uint16_t i;
305
306         /* for packet size based scheduler, nb_slaves have to >= 2 */
307         if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
308                 CS_LOG_ERR("not enough slaves to start");
309                 return -1;
310         }
311
312         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
313                 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
314                 struct psd_scheduler_qp_ctx *ps_qp_ctx =
315                                 qp_ctx->private_qp_ctx;
316
317                 ps_qp_ctx->primary_slave.dev_id =
318                                 sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
319                 ps_qp_ctx->primary_slave.qp_id = i;
320                 ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
321
322                 ps_qp_ctx->secondary_slave.dev_id =
323                                 sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
324                 ps_qp_ctx->secondary_slave.qp_id = i;
325                 ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
326
327                 ps_qp_ctx->threshold = psd_ctx->threshold;
328
329                 ps_qp_ctx->max_nb_objs = sched_ctx->qp_conf.nb_descriptors;
330         }
331
332         if (sched_ctx->reordering_enabled) {
333                 dev->enqueue_burst = &schedule_enqueue_ordering;
334                 dev->dequeue_burst = &schedule_dequeue_ordering;
335         } else {
336                 dev->enqueue_burst = &schedule_enqueue;
337                 dev->dequeue_burst = &schedule_dequeue;
338         }
339
340         return 0;
341 }
342
343 static int
344 scheduler_stop(struct rte_cryptodev *dev)
345 {
346         uint16_t i;
347
348         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
349                 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
350                 struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
351
352                 if (ps_qp_ctx->primary_slave.nb_inflight_cops +
353                                 ps_qp_ctx->secondary_slave.nb_inflight_cops) {
354                         CS_LOG_ERR("Some crypto ops left in slave queue");
355                         return -1;
356                 }
357         }
358
359         return 0;
360 }
361
362 static int
363 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
364 {
365         struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
366         struct psd_scheduler_qp_ctx *ps_qp_ctx;
367
368         ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
369                         rte_socket_id());
370         if (!ps_qp_ctx) {
371                 CS_LOG_ERR("failed allocate memory for private queue pair");
372                 return -ENOMEM;
373         }
374
375         qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;
376
377         return 0;
378 }
379
380 static int
381 scheduler_create_private_ctx(struct rte_cryptodev *dev)
382 {
383         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
384         struct psd_scheduler_ctx *psd_ctx;
385
386         if (sched_ctx->private_ctx)
387                 rte_free(sched_ctx->private_ctx);
388
389         psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
390                         rte_socket_id());
391         if (!psd_ctx) {
392                 CS_LOG_ERR("failed allocate memory");
393                 return -ENOMEM;
394         }
395
396         psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD;
397
398         sched_ctx->private_ctx = (void *)psd_ctx;
399
400         return 0;
401 }
402 static int
403 scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
404                 void *option)
405 {
406         struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
407                         dev->data->dev_private)->private_ctx;
408         uint32_t threshold;
409
410         if ((enum rte_cryptodev_schedule_option_type)option_type !=
411                         CDEV_SCHED_OPTION_THRESHOLD) {
412                 CS_LOG_ERR("Option not supported");
413                 return -EINVAL;
414         }
415
416         threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
417                         option)->threshold;
418         if (!rte_is_power_of_2(threshold)) {
419                 CS_LOG_ERR("Threshold is not power of 2");
420                 return -EINVAL;
421         }
422
423         psd_ctx->threshold = ~(threshold - 1);
424
425         return 0;
426 }
427
428 static int
429 scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
430                 void *option)
431 {
432         struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
433                         dev->data->dev_private)->private_ctx;
434         struct rte_cryptodev_scheduler_threshold_option *threshold_option;
435
436         if ((enum rte_cryptodev_schedule_option_type)option_type !=
437                         CDEV_SCHED_OPTION_THRESHOLD) {
438                 CS_LOG_ERR("Option not supported");
439                 return -EINVAL;
440         }
441
442         threshold_option = option;
443         threshold_option->threshold = (~psd_ctx->threshold) + 1;
444
445         return 0;
446 }
447
448 struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
449         slave_attach,
450         slave_detach,
451         scheduler_start,
452         scheduler_stop,
453         scheduler_config_qp,
454         scheduler_create_private_ctx,
455         scheduler_option_set,
456         scheduler_option_get
457 };
458
459 struct rte_cryptodev_scheduler psd_scheduler = {
460                 .name = "packet-size-based-scheduler",
461                 .description = "scheduler which will distribute crypto op "
462                                 "burst based on the packet size",
463                 .mode = CDEV_SCHED_MODE_PKT_SIZE_DISTR,
464                 .ops = &scheduler_ps_ops
465 };
466
467 struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;