730504dab7af7ff16ce7c2fa3dbfefc980c617fa
[dpdk.git] / drivers / crypto / scheduler / rte_cryptodev_scheduler.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <rte_string_fns.h>
5 #include <rte_reorder.h>
6 #include <rte_cryptodev.h>
7 #include <rte_cryptodev_pmd.h>
8 #include <rte_malloc.h>
9
10 #include "rte_cryptodev_scheduler.h"
11 #include "scheduler_pmd_private.h"
12
13 /** update the scheduler pmd's capability with attaching device's
14  *  capability.
15  *  For each device to be attached, the scheduler's capability should be
16  *  the common capability set of all slaves
17  **/
18 static uint32_t
19 sync_caps(struct rte_cryptodev_capabilities *caps,
20                 uint32_t nb_caps,
21                 const struct rte_cryptodev_capabilities *slave_caps)
22 {
23         uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
24         uint32_t i;
25
26         while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
27                 nb_slave_caps++;
28
29         if (nb_caps == 0) {
30                 rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
31                 return nb_slave_caps;
32         }
33
34         for (i = 0; i < sync_nb_caps; i++) {
35                 struct rte_cryptodev_capabilities *cap = &caps[i];
36                 uint32_t j;
37
38                 for (j = 0; j < nb_slave_caps; j++) {
39                         const struct rte_cryptodev_capabilities *s_cap =
40                                         &slave_caps[j];
41
42                         if (s_cap->op != cap->op || s_cap->sym.xform_type !=
43                                         cap->sym.xform_type)
44                                 continue;
45
46                         if (s_cap->sym.xform_type ==
47                                         RTE_CRYPTO_SYM_XFORM_AUTH) {
48                                 if (s_cap->sym.auth.algo !=
49                                                 cap->sym.auth.algo)
50                                         continue;
51
52                                 cap->sym.auth.digest_size.min =
53                                         s_cap->sym.auth.digest_size.min <
54                                         cap->sym.auth.digest_size.min ?
55                                         s_cap->sym.auth.digest_size.min :
56                                         cap->sym.auth.digest_size.min;
57                                 cap->sym.auth.digest_size.max =
58                                         s_cap->sym.auth.digest_size.max <
59                                         cap->sym.auth.digest_size.max ?
60                                         s_cap->sym.auth.digest_size.max :
61                                         cap->sym.auth.digest_size.max;
62
63                         }
64
65                         if (s_cap->sym.xform_type ==
66                                         RTE_CRYPTO_SYM_XFORM_CIPHER)
67                                 if (s_cap->sym.cipher.algo !=
68                                                 cap->sym.cipher.algo)
69                                         continue;
70
71                         /* no common cap found */
72                         break;
73                 }
74
75                 if (j < nb_slave_caps)
76                         continue;
77
78                 /* remove a uncommon cap from the array */
79                 for (j = i; j < sync_nb_caps - 1; j++)
80                         rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
81
82                 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
83                 sync_nb_caps--;
84         }
85
86         return sync_nb_caps;
87 }
88
89 static int
90 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
91 {
92         struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
93         uint32_t nb_caps = 0, i;
94
95         if (sched_ctx->capabilities) {
96                 rte_free(sched_ctx->capabilities);
97                 sched_ctx->capabilities = NULL;
98         }
99
100         for (i = 0; i < sched_ctx->nb_slaves; i++) {
101                 struct rte_cryptodev_info dev_info;
102
103                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
104
105                 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
106                 if (nb_caps == 0)
107                         return -1;
108         }
109
110         sched_ctx->capabilities = rte_zmalloc_socket(NULL,
111                         sizeof(struct rte_cryptodev_capabilities) *
112                         (nb_caps + 1), 0, SOCKET_ID_ANY);
113         if (!sched_ctx->capabilities)
114                 return -ENOMEM;
115
116         rte_memcpy(sched_ctx->capabilities, tmp_caps,
117                         sizeof(struct rte_cryptodev_capabilities) * nb_caps);
118
119         return 0;
120 }
121
122 static void
123 update_scheduler_feature_flag(struct rte_cryptodev *dev)
124 {
125         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
126         uint32_t i;
127
128         dev->feature_flags = 0;
129
130         for (i = 0; i < sched_ctx->nb_slaves; i++) {
131                 struct rte_cryptodev_info dev_info;
132
133                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
134
135                 dev->feature_flags |= dev_info.feature_flags;
136         }
137 }
138
139 static void
140 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
141 {
142         uint32_t i;
143         uint32_t max_nb_qp;
144
145         if (!sched_ctx->nb_slaves)
146                 return;
147
148         max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
149
150         for (i = 0; i < sched_ctx->nb_slaves; i++) {
151                 struct rte_cryptodev_info dev_info;
152
153                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
154                 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
155                                 dev_info.max_nb_queue_pairs : max_nb_qp;
156         }
157
158         sched_ctx->max_nb_queue_pairs = max_nb_qp;
159 }
160
161 /** Attach a device to the scheduler. */
162 int
163 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
164 {
165         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
166         struct scheduler_ctx *sched_ctx;
167         struct scheduler_slave *slave;
168         struct rte_cryptodev_info dev_info;
169         uint32_t i;
170
171         if (!dev) {
172                 CR_SCHED_LOG(ERR, "Operation not supported");
173                 return -ENOTSUP;
174         }
175
176         if (dev->driver_id != cryptodev_scheduler_driver_id) {
177                 CR_SCHED_LOG(ERR, "Operation not supported");
178                 return -ENOTSUP;
179         }
180
181         if (dev->data->dev_started) {
182                 CR_SCHED_LOG(ERR, "Illegal operation");
183                 return -EBUSY;
184         }
185
186         sched_ctx = dev->data->dev_private;
187         if (sched_ctx->nb_slaves >=
188                         RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
189                 CR_SCHED_LOG(ERR, "Too many slaves attached");
190                 return -ENOMEM;
191         }
192
193         for (i = 0; i < sched_ctx->nb_slaves; i++)
194                 if (sched_ctx->slaves[i].dev_id == slave_id) {
195                         CR_SCHED_LOG(ERR, "Slave already added");
196                         return -ENOTSUP;
197                 }
198
199         slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
200
201         rte_cryptodev_info_get(slave_id, &dev_info);
202
203         slave->dev_id = slave_id;
204         slave->driver_id = dev_info.driver_id;
205         sched_ctx->nb_slaves++;
206
207         if (update_scheduler_capability(sched_ctx) < 0) {
208                 slave->dev_id = 0;
209                 slave->driver_id = 0;
210                 sched_ctx->nb_slaves--;
211
212                 CR_SCHED_LOG(ERR, "capabilities update failed");
213                 return -ENOTSUP;
214         }
215
216         update_scheduler_feature_flag(dev);
217
218         update_max_nb_qp(sched_ctx);
219
220         return 0;
221 }
222
223 int
224 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
225 {
226         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
227         struct scheduler_ctx *sched_ctx;
228         uint32_t i, slave_pos;
229
230         if (!dev) {
231                 CR_SCHED_LOG(ERR, "Operation not supported");
232                 return -ENOTSUP;
233         }
234
235         if (dev->driver_id != cryptodev_scheduler_driver_id) {
236                 CR_SCHED_LOG(ERR, "Operation not supported");
237                 return -ENOTSUP;
238         }
239
240         if (dev->data->dev_started) {
241                 CR_SCHED_LOG(ERR, "Illegal operation");
242                 return -EBUSY;
243         }
244
245         sched_ctx = dev->data->dev_private;
246
247         for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
248                 if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
249                         break;
250         if (slave_pos == sched_ctx->nb_slaves) {
251                 CR_SCHED_LOG(ERR, "Cannot find slave");
252                 return -ENOTSUP;
253         }
254
255         if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
256                 CR_SCHED_LOG(ERR, "Failed to detach slave");
257                 return -ENOTSUP;
258         }
259
260         for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
261                 memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
262                                 sizeof(struct scheduler_slave));
263         }
264         memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
265                         sizeof(struct scheduler_slave));
266         sched_ctx->nb_slaves--;
267
268         if (update_scheduler_capability(sched_ctx) < 0) {
269                 CR_SCHED_LOG(ERR, "capabilities update failed");
270                 return -ENOTSUP;
271         }
272
273         update_scheduler_feature_flag(dev);
274
275         update_max_nb_qp(sched_ctx);
276
277         return 0;
278 }
279
280 int
281 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
282                 enum rte_cryptodev_scheduler_mode mode)
283 {
284         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
285         struct scheduler_ctx *sched_ctx;
286
287         if (!dev) {
288                 CR_SCHED_LOG(ERR, "Operation not supported");
289                 return -ENOTSUP;
290         }
291
292         if (dev->driver_id != cryptodev_scheduler_driver_id) {
293                 CR_SCHED_LOG(ERR, "Operation not supported");
294                 return -ENOTSUP;
295         }
296
297         if (dev->data->dev_started) {
298                 CR_SCHED_LOG(ERR, "Illegal operation");
299                 return -EBUSY;
300         }
301
302         sched_ctx = dev->data->dev_private;
303
304         if (mode == sched_ctx->mode)
305                 return 0;
306
307         switch (mode) {
308         case CDEV_SCHED_MODE_ROUNDROBIN:
309                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
310                                 crypto_scheduler_roundrobin) < 0) {
311                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
312                         return -1;
313                 }
314                 break;
315         case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
316                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
317                                 crypto_scheduler_pkt_size_based_distr) < 0) {
318                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
319                         return -1;
320                 }
321                 break;
322         case CDEV_SCHED_MODE_FAILOVER:
323                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
324                                 crypto_scheduler_failover) < 0) {
325                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
326                         return -1;
327                 }
328                 break;
329         case CDEV_SCHED_MODE_MULTICORE:
330                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
331                                 crypto_scheduler_multicore) < 0) {
332                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
333                         return -1;
334                 }
335                 break;
336         default:
337                 CR_SCHED_LOG(ERR, "Not yet supported");
338                 return -ENOTSUP;
339         }
340
341         return 0;
342 }
343
344 enum rte_cryptodev_scheduler_mode
345 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
346 {
347         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
348         struct scheduler_ctx *sched_ctx;
349
350         if (!dev) {
351                 CR_SCHED_LOG(ERR, "Operation not supported");
352                 return -ENOTSUP;
353         }
354
355         if (dev->driver_id != cryptodev_scheduler_driver_id) {
356                 CR_SCHED_LOG(ERR, "Operation not supported");
357                 return -ENOTSUP;
358         }
359
360         sched_ctx = dev->data->dev_private;
361
362         return sched_ctx->mode;
363 }
364
365 int
366 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
367                 uint32_t enable_reorder)
368 {
369         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
370         struct scheduler_ctx *sched_ctx;
371
372         if (!dev) {
373                 CR_SCHED_LOG(ERR, "Operation not supported");
374                 return -ENOTSUP;
375         }
376
377         if (dev->driver_id != cryptodev_scheduler_driver_id) {
378                 CR_SCHED_LOG(ERR, "Operation not supported");
379                 return -ENOTSUP;
380         }
381
382         if (dev->data->dev_started) {
383                 CR_SCHED_LOG(ERR, "Illegal operation");
384                 return -EBUSY;
385         }
386
387         sched_ctx = dev->data->dev_private;
388
389         sched_ctx->reordering_enabled = enable_reorder;
390
391         return 0;
392 }
393
394 int
395 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
396 {
397         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
398         struct scheduler_ctx *sched_ctx;
399
400         if (!dev) {
401                 CR_SCHED_LOG(ERR, "Operation not supported");
402                 return -ENOTSUP;
403         }
404
405         if (dev->driver_id != cryptodev_scheduler_driver_id) {
406                 CR_SCHED_LOG(ERR, "Operation not supported");
407                 return -ENOTSUP;
408         }
409
410         sched_ctx = dev->data->dev_private;
411
412         return (int)sched_ctx->reordering_enabled;
413 }
414
415 int
416 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
417                 struct rte_cryptodev_scheduler *scheduler) {
418
419         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
420         struct scheduler_ctx *sched_ctx;
421
422         if (!dev) {
423                 CR_SCHED_LOG(ERR, "Operation not supported");
424                 return -ENOTSUP;
425         }
426
427         if (dev->driver_id != cryptodev_scheduler_driver_id) {
428                 CR_SCHED_LOG(ERR, "Operation not supported");
429                 return -ENOTSUP;
430         }
431
432         if (dev->data->dev_started) {
433                 CR_SCHED_LOG(ERR, "Illegal operation");
434                 return -EBUSY;
435         }
436
437         sched_ctx = dev->data->dev_private;
438
439         if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
440                 CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
441                                 "%u bytes.", scheduler->name,
442                                 RTE_CRYPTODEV_NAME_MAX_LEN);
443                 return -EINVAL;
444         }
445         strlcpy(sched_ctx->name, scheduler->name, sizeof(sched_ctx->name));
446
447         if (strlen(scheduler->description) >
448                         RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
449                 CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
450                                 "%u bytes.", scheduler->description,
451                                 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
452                 return -EINVAL;
453         }
454         strlcpy(sched_ctx->description, scheduler->description,
455                 sizeof(sched_ctx->description));
456
457         /* load scheduler instance operations functions */
458         sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
459         sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
460         sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
461         sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
462         sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
463         sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
464         sched_ctx->ops.option_set = scheduler->ops->option_set;
465         sched_ctx->ops.option_get = scheduler->ops->option_get;
466
467         if (sched_ctx->private_ctx) {
468                 rte_free(sched_ctx->private_ctx);
469                 sched_ctx->private_ctx = NULL;
470         }
471
472         if (sched_ctx->ops.create_private_ctx) {
473                 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
474
475                 if (ret < 0) {
476                         CR_SCHED_LOG(ERR, "Unable to create scheduler private "
477                                         "context");
478                         return ret;
479                 }
480         }
481
482         sched_ctx->mode = scheduler->mode;
483
484         return 0;
485 }
486
487 int
488 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
489 {
490         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
491         struct scheduler_ctx *sched_ctx;
492         uint32_t nb_slaves = 0;
493
494         if (!dev) {
495                 CR_SCHED_LOG(ERR, "Operation not supported");
496                 return -ENOTSUP;
497         }
498
499         if (dev->driver_id != cryptodev_scheduler_driver_id) {
500                 CR_SCHED_LOG(ERR, "Operation not supported");
501                 return -ENOTSUP;
502         }
503
504         sched_ctx = dev->data->dev_private;
505
506         nb_slaves = sched_ctx->nb_slaves;
507
508         if (slaves && nb_slaves) {
509                 uint32_t i;
510
511                 for (i = 0; i < nb_slaves; i++)
512                         slaves[i] = sched_ctx->slaves[i].dev_id;
513         }
514
515         return (int)nb_slaves;
516 }
517
518 int
519 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
520                 enum rte_cryptodev_schedule_option_type option_type,
521                 void *option)
522 {
523         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
524         struct scheduler_ctx *sched_ctx;
525
526         if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
527                         option_type >= CDEV_SCHED_OPTION_COUNT) {
528                 CR_SCHED_LOG(ERR, "Invalid option parameter");
529                 return -EINVAL;
530         }
531
532         if (!option) {
533                 CR_SCHED_LOG(ERR, "Invalid option parameter");
534                 return -EINVAL;
535         }
536
537         if (dev->data->dev_started) {
538                 CR_SCHED_LOG(ERR, "Illegal operation");
539                 return -EBUSY;
540         }
541
542         sched_ctx = dev->data->dev_private;
543
544         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
545
546         return (*sched_ctx->ops.option_set)(dev, option_type, option);
547 }
548
549 int
550 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
551                 enum rte_cryptodev_schedule_option_type option_type,
552                 void *option)
553 {
554         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
555         struct scheduler_ctx *sched_ctx;
556
557         if (!dev) {
558                 CR_SCHED_LOG(ERR, "Operation not supported");
559                 return -ENOTSUP;
560         }
561
562         if (!option) {
563                 CR_SCHED_LOG(ERR, "Invalid option parameter");
564                 return -EINVAL;
565         }
566
567         if (dev->driver_id != cryptodev_scheduler_driver_id) {
568                 CR_SCHED_LOG(ERR, "Operation not supported");
569                 return -ENOTSUP;
570         }
571
572         sched_ctx = dev->data->dev_private;
573
574         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
575
576         return (*sched_ctx->ops.option_get)(dev, option_type, option);
577 }
578
579
580 RTE_LOG_REGISTER(scheduler_logtype_driver, pmd.crypto.scheduler, INFO);