ed574cc181a1dd97ad237bed044e041f7d14ce61
[dpdk.git] / drivers / crypto / scheduler / rte_cryptodev_scheduler.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <rte_reorder.h>
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_malloc.h>
8
9 #include "rte_cryptodev_scheduler.h"
10 #include "scheduler_pmd_private.h"
11
12 /** update the scheduler pmd's capability with attaching device's
13  *  capability.
14  *  For each device to be attached, the scheduler's capability should be
15  *  the common capability set of all slaves
16  **/
17 static uint32_t
18 sync_caps(struct rte_cryptodev_capabilities *caps,
19                 uint32_t nb_caps,
20                 const struct rte_cryptodev_capabilities *slave_caps)
21 {
22         uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
23         uint32_t i;
24
25         while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
26                 nb_slave_caps++;
27
28         if (nb_caps == 0) {
29                 rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
30                 return nb_slave_caps;
31         }
32
33         for (i = 0; i < sync_nb_caps; i++) {
34                 struct rte_cryptodev_capabilities *cap = &caps[i];
35                 uint32_t j;
36
37                 for (j = 0; j < nb_slave_caps; j++) {
38                         const struct rte_cryptodev_capabilities *s_cap =
39                                         &slave_caps[j];
40
41                         if (s_cap->op != cap->op || s_cap->sym.xform_type !=
42                                         cap->sym.xform_type)
43                                 continue;
44
45                         if (s_cap->sym.xform_type ==
46                                         RTE_CRYPTO_SYM_XFORM_AUTH) {
47                                 if (s_cap->sym.auth.algo !=
48                                                 cap->sym.auth.algo)
49                                         continue;
50
51                                 cap->sym.auth.digest_size.min =
52                                         s_cap->sym.auth.digest_size.min <
53                                         cap->sym.auth.digest_size.min ?
54                                         s_cap->sym.auth.digest_size.min :
55                                         cap->sym.auth.digest_size.min;
56                                 cap->sym.auth.digest_size.max =
57                                         s_cap->sym.auth.digest_size.max <
58                                         cap->sym.auth.digest_size.max ?
59                                         s_cap->sym.auth.digest_size.max :
60                                         cap->sym.auth.digest_size.max;
61
62                         }
63
64                         if (s_cap->sym.xform_type ==
65                                         RTE_CRYPTO_SYM_XFORM_CIPHER)
66                                 if (s_cap->sym.cipher.algo !=
67                                                 cap->sym.cipher.algo)
68                                         continue;
69
70                         /* no common cap found */
71                         break;
72                 }
73
74                 if (j < nb_slave_caps)
75                         continue;
76
77                 /* remove a uncommon cap from the array */
78                 for (j = i; j < sync_nb_caps - 1; j++)
79                         rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
80
81                 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
82                 sync_nb_caps--;
83         }
84
85         return sync_nb_caps;
86 }
87
88 static int
89 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
90 {
91         struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
92         uint32_t nb_caps = 0, i;
93
94         if (sched_ctx->capabilities) {
95                 rte_free(sched_ctx->capabilities);
96                 sched_ctx->capabilities = NULL;
97         }
98
99         for (i = 0; i < sched_ctx->nb_slaves; i++) {
100                 struct rte_cryptodev_info dev_info;
101
102                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
103
104                 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
105                 if (nb_caps == 0)
106                         return -1;
107         }
108
109         sched_ctx->capabilities = rte_zmalloc_socket(NULL,
110                         sizeof(struct rte_cryptodev_capabilities) *
111                         (nb_caps + 1), 0, SOCKET_ID_ANY);
112         if (!sched_ctx->capabilities)
113                 return -ENOMEM;
114
115         rte_memcpy(sched_ctx->capabilities, tmp_caps,
116                         sizeof(struct rte_cryptodev_capabilities) * nb_caps);
117
118         return 0;
119 }
120
121 static void
122 update_scheduler_feature_flag(struct rte_cryptodev *dev)
123 {
124         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
125         uint32_t i;
126
127         dev->feature_flags = 0;
128
129         for (i = 0; i < sched_ctx->nb_slaves; i++) {
130                 struct rte_cryptodev_info dev_info;
131
132                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
133
134                 dev->feature_flags |= dev_info.feature_flags;
135         }
136 }
137
138 static void
139 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
140 {
141         uint32_t i;
142         uint32_t max_nb_qp;
143
144         if (!sched_ctx->nb_slaves)
145                 return;
146
147         max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
148
149         for (i = 0; i < sched_ctx->nb_slaves; i++) {
150                 struct rte_cryptodev_info dev_info;
151
152                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
153                 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
154                                 dev_info.max_nb_queue_pairs : max_nb_qp;
155         }
156
157         sched_ctx->max_nb_queue_pairs = max_nb_qp;
158 }
159
160 /** Attach a device to the scheduler. */
161 int
162 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
163 {
164         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
165         struct scheduler_ctx *sched_ctx;
166         struct scheduler_slave *slave;
167         struct rte_cryptodev_info dev_info;
168         uint32_t i;
169
170         if (!dev) {
171                 CS_LOG_ERR("Operation not supported");
172                 return -ENOTSUP;
173         }
174
175         if (dev->driver_id != cryptodev_driver_id) {
176                 CS_LOG_ERR("Operation not supported");
177                 return -ENOTSUP;
178         }
179
180         if (dev->data->dev_started) {
181                 CS_LOG_ERR("Illegal operation");
182                 return -EBUSY;
183         }
184
185         sched_ctx = dev->data->dev_private;
186         if (sched_ctx->nb_slaves >=
187                         RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
188                 CS_LOG_ERR("Too many slaves attached");
189                 return -ENOMEM;
190         }
191
192         for (i = 0; i < sched_ctx->nb_slaves; i++)
193                 if (sched_ctx->slaves[i].dev_id == slave_id) {
194                         CS_LOG_ERR("Slave already added");
195                         return -ENOTSUP;
196                 }
197
198         slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
199
200         rte_cryptodev_info_get(slave_id, &dev_info);
201
202         slave->dev_id = slave_id;
203         slave->driver_id = dev_info.driver_id;
204         sched_ctx->nb_slaves++;
205
206         if (update_scheduler_capability(sched_ctx) < 0) {
207                 slave->dev_id = 0;
208                 slave->driver_id = 0;
209                 sched_ctx->nb_slaves--;
210
211                 CS_LOG_ERR("capabilities update failed");
212                 return -ENOTSUP;
213         }
214
215         update_scheduler_feature_flag(dev);
216
217         update_max_nb_qp(sched_ctx);
218
219         return 0;
220 }
221
222 int
223 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
224 {
225         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
226         struct scheduler_ctx *sched_ctx;
227         uint32_t i, slave_pos;
228
229         if (!dev) {
230                 CS_LOG_ERR("Operation not supported");
231                 return -ENOTSUP;
232         }
233
234         if (dev->driver_id != cryptodev_driver_id) {
235                 CS_LOG_ERR("Operation not supported");
236                 return -ENOTSUP;
237         }
238
239         if (dev->data->dev_started) {
240                 CS_LOG_ERR("Illegal operation");
241                 return -EBUSY;
242         }
243
244         sched_ctx = dev->data->dev_private;
245
246         for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
247                 if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
248                         break;
249         if (slave_pos == sched_ctx->nb_slaves) {
250                 CS_LOG_ERR("Cannot find slave");
251                 return -ENOTSUP;
252         }
253
254         if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
255                 CS_LOG_ERR("Failed to detach slave");
256                 return -ENOTSUP;
257         }
258
259         for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
260                 memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
261                                 sizeof(struct scheduler_slave));
262         }
263         memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
264                         sizeof(struct scheduler_slave));
265         sched_ctx->nb_slaves--;
266
267         if (update_scheduler_capability(sched_ctx) < 0) {
268                 CS_LOG_ERR("capabilities update failed");
269                 return -ENOTSUP;
270         }
271
272         update_scheduler_feature_flag(dev);
273
274         update_max_nb_qp(sched_ctx);
275
276         return 0;
277 }
278
279 int
280 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
281                 enum rte_cryptodev_scheduler_mode mode)
282 {
283         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
284         struct scheduler_ctx *sched_ctx;
285
286         if (!dev) {
287                 CS_LOG_ERR("Operation not supported");
288                 return -ENOTSUP;
289         }
290
291         if (dev->driver_id != cryptodev_driver_id) {
292                 CS_LOG_ERR("Operation not supported");
293                 return -ENOTSUP;
294         }
295
296         if (dev->data->dev_started) {
297                 CS_LOG_ERR("Illegal operation");
298                 return -EBUSY;
299         }
300
301         sched_ctx = dev->data->dev_private;
302
303         if (mode == sched_ctx->mode)
304                 return 0;
305
306         switch (mode) {
307         case CDEV_SCHED_MODE_ROUNDROBIN:
308                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
309                                 roundrobin_scheduler) < 0) {
310                         CS_LOG_ERR("Failed to load scheduler");
311                         return -1;
312                 }
313                 break;
314         case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
315                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
316                                 pkt_size_based_distr_scheduler) < 0) {
317                         CS_LOG_ERR("Failed to load scheduler");
318                         return -1;
319                 }
320                 break;
321         case CDEV_SCHED_MODE_FAILOVER:
322                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
323                                 failover_scheduler) < 0) {
324                         CS_LOG_ERR("Failed to load scheduler");
325                         return -1;
326                 }
327                 break;
328         case CDEV_SCHED_MODE_MULTICORE:
329                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
330                                 multicore_scheduler) < 0) {
331                         CS_LOG_ERR("Failed to load scheduler");
332                         return -1;
333                 }
334                 break;
335         default:
336                 CS_LOG_ERR("Not yet supported");
337                 return -ENOTSUP;
338         }
339
340         return 0;
341 }
342
343 enum rte_cryptodev_scheduler_mode
344 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
345 {
346         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
347         struct scheduler_ctx *sched_ctx;
348
349         if (!dev) {
350                 CS_LOG_ERR("Operation not supported");
351                 return -ENOTSUP;
352         }
353
354         if (dev->driver_id != cryptodev_driver_id) {
355                 CS_LOG_ERR("Operation not supported");
356                 return -ENOTSUP;
357         }
358
359         sched_ctx = dev->data->dev_private;
360
361         return sched_ctx->mode;
362 }
363
364 int
365 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
366                 uint32_t enable_reorder)
367 {
368         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
369         struct scheduler_ctx *sched_ctx;
370
371         if (!dev) {
372                 CS_LOG_ERR("Operation not supported");
373                 return -ENOTSUP;
374         }
375
376         if (dev->driver_id != cryptodev_driver_id) {
377                 CS_LOG_ERR("Operation not supported");
378                 return -ENOTSUP;
379         }
380
381         if (dev->data->dev_started) {
382                 CS_LOG_ERR("Illegal operation");
383                 return -EBUSY;
384         }
385
386         sched_ctx = dev->data->dev_private;
387
388         sched_ctx->reordering_enabled = enable_reorder;
389
390         return 0;
391 }
392
393 int
394 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
395 {
396         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
397         struct scheduler_ctx *sched_ctx;
398
399         if (!dev) {
400                 CS_LOG_ERR("Operation not supported");
401                 return -ENOTSUP;
402         }
403
404         if (dev->driver_id != cryptodev_driver_id) {
405                 CS_LOG_ERR("Operation not supported");
406                 return -ENOTSUP;
407         }
408
409         sched_ctx = dev->data->dev_private;
410
411         return (int)sched_ctx->reordering_enabled;
412 }
413
414 int
415 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
416                 struct rte_cryptodev_scheduler *scheduler) {
417
418         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
419         struct scheduler_ctx *sched_ctx;
420
421         if (!dev) {
422                 CS_LOG_ERR("Operation not supported");
423                 return -ENOTSUP;
424         }
425
426         if (dev->driver_id != cryptodev_driver_id) {
427                 CS_LOG_ERR("Operation not supported");
428                 return -ENOTSUP;
429         }
430
431         if (dev->data->dev_started) {
432                 CS_LOG_ERR("Illegal operation");
433                 return -EBUSY;
434         }
435
436         sched_ctx = dev->data->dev_private;
437
438         if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
439                 CS_LOG_ERR("Invalid name %s, should be less than "
440                                 "%u bytes.\n", scheduler->name,
441                                 RTE_CRYPTODEV_NAME_MAX_LEN);
442                 return -EINVAL;
443         }
444         snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
445                         scheduler->name);
446
447         if (strlen(scheduler->description) >
448                         RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
449                 CS_LOG_ERR("Invalid description %s, should be less than "
450                                 "%u bytes.\n", scheduler->description,
451                                 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
452                 return -EINVAL;
453         }
454         snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
455                         scheduler->description);
456
457         /* load scheduler instance operations functions */
458         sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
459         sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
460         sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
461         sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
462         sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
463         sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
464         sched_ctx->ops.option_set = scheduler->ops->option_set;
465         sched_ctx->ops.option_get = scheduler->ops->option_get;
466
467         if (sched_ctx->private_ctx) {
468                 rte_free(sched_ctx->private_ctx);
469                 sched_ctx->private_ctx = NULL;
470         }
471
472         if (sched_ctx->ops.create_private_ctx) {
473                 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
474
475                 if (ret < 0) {
476                         CS_LOG_ERR("Unable to create scheduler private "
477                                         "context");
478                         return ret;
479                 }
480         }
481
482         sched_ctx->mode = scheduler->mode;
483
484         return 0;
485 }
486
487 int
488 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
489 {
490         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
491         struct scheduler_ctx *sched_ctx;
492         uint32_t nb_slaves = 0;
493
494         if (!dev) {
495                 CS_LOG_ERR("Operation not supported");
496                 return -ENOTSUP;
497         }
498
499         if (dev->driver_id != cryptodev_driver_id) {
500                 CS_LOG_ERR("Operation not supported");
501                 return -ENOTSUP;
502         }
503
504         sched_ctx = dev->data->dev_private;
505
506         nb_slaves = sched_ctx->nb_slaves;
507
508         if (slaves && nb_slaves) {
509                 uint32_t i;
510
511                 for (i = 0; i < nb_slaves; i++)
512                         slaves[i] = sched_ctx->slaves[i].dev_id;
513         }
514
515         return (int)nb_slaves;
516 }
517
518 int
519 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
520                 enum rte_cryptodev_schedule_option_type option_type,
521                 void *option)
522 {
523         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
524         struct scheduler_ctx *sched_ctx;
525
526         if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
527                         option_type >= CDEV_SCHED_OPTION_COUNT) {
528                 CS_LOG_ERR("Invalid option parameter");
529                 return -EINVAL;
530         }
531
532         if (!option) {
533                 CS_LOG_ERR("Invalid option parameter");
534                 return -EINVAL;
535         }
536
537         if (dev->data->dev_started) {
538                 CS_LOG_ERR("Illegal operation");
539                 return -EBUSY;
540         }
541
542         sched_ctx = dev->data->dev_private;
543
544         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
545
546         return (*sched_ctx->ops.option_set)(dev, option_type, option);
547 }
548
549 int
550 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
551                 enum rte_cryptodev_schedule_option_type option_type,
552                 void *option)
553 {
554         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
555         struct scheduler_ctx *sched_ctx;
556
557         if (!dev) {
558                 CS_LOG_ERR("Operation not supported");
559                 return -ENOTSUP;
560         }
561
562         if (!option) {
563                 CS_LOG_ERR("Invalid option parameter");
564                 return -EINVAL;
565         }
566
567         if (dev->driver_id != cryptodev_driver_id) {
568                 CS_LOG_ERR("Operation not supported");
569                 return -ENOTSUP;
570         }
571
572         sched_ctx = dev->data->dev_private;
573
574         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
575
576         return (*sched_ctx->ops.option_get)(dev, option_type, option);
577 }