net/bnxt: fix handling interface change status
[dpdk.git] / drivers / crypto / scheduler / rte_cryptodev_scheduler.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <rte_string_fns.h>
5 #include <rte_reorder.h>
6 #include <rte_cryptodev.h>
7 #include <rte_cryptodev_pmd.h>
8 #include <rte_malloc.h>
9
10 #include "rte_cryptodev_scheduler.h"
11 #include "scheduler_pmd_private.h"
12
13 int scheduler_logtype_driver;
14
15 /** update the scheduler pmd's capability with attaching device's
16  *  capability.
17  *  For each device to be attached, the scheduler's capability should be
18  *  the common capability set of all slaves
19  **/
20 static uint32_t
21 sync_caps(struct rte_cryptodev_capabilities *caps,
22                 uint32_t nb_caps,
23                 const struct rte_cryptodev_capabilities *slave_caps)
24 {
25         uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
26         uint32_t i;
27
28         while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
29                 nb_slave_caps++;
30
31         if (nb_caps == 0) {
32                 rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
33                 return nb_slave_caps;
34         }
35
36         for (i = 0; i < sync_nb_caps; i++) {
37                 struct rte_cryptodev_capabilities *cap = &caps[i];
38                 uint32_t j;
39
40                 for (j = 0; j < nb_slave_caps; j++) {
41                         const struct rte_cryptodev_capabilities *s_cap =
42                                         &slave_caps[j];
43
44                         if (s_cap->op != cap->op || s_cap->sym.xform_type !=
45                                         cap->sym.xform_type)
46                                 continue;
47
48                         if (s_cap->sym.xform_type ==
49                                         RTE_CRYPTO_SYM_XFORM_AUTH) {
50                                 if (s_cap->sym.auth.algo !=
51                                                 cap->sym.auth.algo)
52                                         continue;
53
54                                 cap->sym.auth.digest_size.min =
55                                         s_cap->sym.auth.digest_size.min <
56                                         cap->sym.auth.digest_size.min ?
57                                         s_cap->sym.auth.digest_size.min :
58                                         cap->sym.auth.digest_size.min;
59                                 cap->sym.auth.digest_size.max =
60                                         s_cap->sym.auth.digest_size.max <
61                                         cap->sym.auth.digest_size.max ?
62                                         s_cap->sym.auth.digest_size.max :
63                                         cap->sym.auth.digest_size.max;
64
65                         }
66
67                         if (s_cap->sym.xform_type ==
68                                         RTE_CRYPTO_SYM_XFORM_CIPHER)
69                                 if (s_cap->sym.cipher.algo !=
70                                                 cap->sym.cipher.algo)
71                                         continue;
72
73                         /* no common cap found */
74                         break;
75                 }
76
77                 if (j < nb_slave_caps)
78                         continue;
79
80                 /* remove a uncommon cap from the array */
81                 for (j = i; j < sync_nb_caps - 1; j++)
82                         rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
83
84                 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
85                 sync_nb_caps--;
86         }
87
88         return sync_nb_caps;
89 }
90
91 static int
92 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
93 {
94         struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
95         uint32_t nb_caps = 0, i;
96
97         if (sched_ctx->capabilities) {
98                 rte_free(sched_ctx->capabilities);
99                 sched_ctx->capabilities = NULL;
100         }
101
102         for (i = 0; i < sched_ctx->nb_slaves; i++) {
103                 struct rte_cryptodev_info dev_info;
104
105                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
106
107                 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
108                 if (nb_caps == 0)
109                         return -1;
110         }
111
112         sched_ctx->capabilities = rte_zmalloc_socket(NULL,
113                         sizeof(struct rte_cryptodev_capabilities) *
114                         (nb_caps + 1), 0, SOCKET_ID_ANY);
115         if (!sched_ctx->capabilities)
116                 return -ENOMEM;
117
118         rte_memcpy(sched_ctx->capabilities, tmp_caps,
119                         sizeof(struct rte_cryptodev_capabilities) * nb_caps);
120
121         return 0;
122 }
123
124 static void
125 update_scheduler_feature_flag(struct rte_cryptodev *dev)
126 {
127         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
128         uint32_t i;
129
130         dev->feature_flags = 0;
131
132         for (i = 0; i < sched_ctx->nb_slaves; i++) {
133                 struct rte_cryptodev_info dev_info;
134
135                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
136
137                 dev->feature_flags |= dev_info.feature_flags;
138         }
139 }
140
141 static void
142 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
143 {
144         uint32_t i;
145         uint32_t max_nb_qp;
146
147         if (!sched_ctx->nb_slaves)
148                 return;
149
150         max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
151
152         for (i = 0; i < sched_ctx->nb_slaves; i++) {
153                 struct rte_cryptodev_info dev_info;
154
155                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
156                 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
157                                 dev_info.max_nb_queue_pairs : max_nb_qp;
158         }
159
160         sched_ctx->max_nb_queue_pairs = max_nb_qp;
161 }
162
163 /** Attach a device to the scheduler. */
164 int
165 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
166 {
167         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
168         struct scheduler_ctx *sched_ctx;
169         struct scheduler_slave *slave;
170         struct rte_cryptodev_info dev_info;
171         uint32_t i;
172
173         if (!dev) {
174                 CR_SCHED_LOG(ERR, "Operation not supported");
175                 return -ENOTSUP;
176         }
177
178         if (dev->driver_id != cryptodev_scheduler_driver_id) {
179                 CR_SCHED_LOG(ERR, "Operation not supported");
180                 return -ENOTSUP;
181         }
182
183         if (dev->data->dev_started) {
184                 CR_SCHED_LOG(ERR, "Illegal operation");
185                 return -EBUSY;
186         }
187
188         sched_ctx = dev->data->dev_private;
189         if (sched_ctx->nb_slaves >=
190                         RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
191                 CR_SCHED_LOG(ERR, "Too many slaves attached");
192                 return -ENOMEM;
193         }
194
195         for (i = 0; i < sched_ctx->nb_slaves; i++)
196                 if (sched_ctx->slaves[i].dev_id == slave_id) {
197                         CR_SCHED_LOG(ERR, "Slave already added");
198                         return -ENOTSUP;
199                 }
200
201         slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
202
203         rte_cryptodev_info_get(slave_id, &dev_info);
204
205         slave->dev_id = slave_id;
206         slave->driver_id = dev_info.driver_id;
207         sched_ctx->nb_slaves++;
208
209         if (update_scheduler_capability(sched_ctx) < 0) {
210                 slave->dev_id = 0;
211                 slave->driver_id = 0;
212                 sched_ctx->nb_slaves--;
213
214                 CR_SCHED_LOG(ERR, "capabilities update failed");
215                 return -ENOTSUP;
216         }
217
218         update_scheduler_feature_flag(dev);
219
220         update_max_nb_qp(sched_ctx);
221
222         return 0;
223 }
224
225 int
226 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
227 {
228         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
229         struct scheduler_ctx *sched_ctx;
230         uint32_t i, slave_pos;
231
232         if (!dev) {
233                 CR_SCHED_LOG(ERR, "Operation not supported");
234                 return -ENOTSUP;
235         }
236
237         if (dev->driver_id != cryptodev_scheduler_driver_id) {
238                 CR_SCHED_LOG(ERR, "Operation not supported");
239                 return -ENOTSUP;
240         }
241
242         if (dev->data->dev_started) {
243                 CR_SCHED_LOG(ERR, "Illegal operation");
244                 return -EBUSY;
245         }
246
247         sched_ctx = dev->data->dev_private;
248
249         for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
250                 if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
251                         break;
252         if (slave_pos == sched_ctx->nb_slaves) {
253                 CR_SCHED_LOG(ERR, "Cannot find slave");
254                 return -ENOTSUP;
255         }
256
257         if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
258                 CR_SCHED_LOG(ERR, "Failed to detach slave");
259                 return -ENOTSUP;
260         }
261
262         for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
263                 memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
264                                 sizeof(struct scheduler_slave));
265         }
266         memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
267                         sizeof(struct scheduler_slave));
268         sched_ctx->nb_slaves--;
269
270         if (update_scheduler_capability(sched_ctx) < 0) {
271                 CR_SCHED_LOG(ERR, "capabilities update failed");
272                 return -ENOTSUP;
273         }
274
275         update_scheduler_feature_flag(dev);
276
277         update_max_nb_qp(sched_ctx);
278
279         return 0;
280 }
281
282 int
283 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
284                 enum rte_cryptodev_scheduler_mode mode)
285 {
286         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
287         struct scheduler_ctx *sched_ctx;
288
289         if (!dev) {
290                 CR_SCHED_LOG(ERR, "Operation not supported");
291                 return -ENOTSUP;
292         }
293
294         if (dev->driver_id != cryptodev_scheduler_driver_id) {
295                 CR_SCHED_LOG(ERR, "Operation not supported");
296                 return -ENOTSUP;
297         }
298
299         if (dev->data->dev_started) {
300                 CR_SCHED_LOG(ERR, "Illegal operation");
301                 return -EBUSY;
302         }
303
304         sched_ctx = dev->data->dev_private;
305
306         if (mode == sched_ctx->mode)
307                 return 0;
308
309         switch (mode) {
310         case CDEV_SCHED_MODE_ROUNDROBIN:
311                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
312                                 crypto_scheduler_roundrobin) < 0) {
313                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
314                         return -1;
315                 }
316                 break;
317         case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
318                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
319                                 crypto_scheduler_pkt_size_based_distr) < 0) {
320                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
321                         return -1;
322                 }
323                 break;
324         case CDEV_SCHED_MODE_FAILOVER:
325                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
326                                 crypto_scheduler_failover) < 0) {
327                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
328                         return -1;
329                 }
330                 break;
331         case CDEV_SCHED_MODE_MULTICORE:
332                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
333                                 crypto_scheduler_multicore) < 0) {
334                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
335                         return -1;
336                 }
337                 break;
338         default:
339                 CR_SCHED_LOG(ERR, "Not yet supported");
340                 return -ENOTSUP;
341         }
342
343         return 0;
344 }
345
346 enum rte_cryptodev_scheduler_mode
347 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
348 {
349         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
350         struct scheduler_ctx *sched_ctx;
351
352         if (!dev) {
353                 CR_SCHED_LOG(ERR, "Operation not supported");
354                 return -ENOTSUP;
355         }
356
357         if (dev->driver_id != cryptodev_scheduler_driver_id) {
358                 CR_SCHED_LOG(ERR, "Operation not supported");
359                 return -ENOTSUP;
360         }
361
362         sched_ctx = dev->data->dev_private;
363
364         return sched_ctx->mode;
365 }
366
367 int
368 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
369                 uint32_t enable_reorder)
370 {
371         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
372         struct scheduler_ctx *sched_ctx;
373
374         if (!dev) {
375                 CR_SCHED_LOG(ERR, "Operation not supported");
376                 return -ENOTSUP;
377         }
378
379         if (dev->driver_id != cryptodev_scheduler_driver_id) {
380                 CR_SCHED_LOG(ERR, "Operation not supported");
381                 return -ENOTSUP;
382         }
383
384         if (dev->data->dev_started) {
385                 CR_SCHED_LOG(ERR, "Illegal operation");
386                 return -EBUSY;
387         }
388
389         sched_ctx = dev->data->dev_private;
390
391         sched_ctx->reordering_enabled = enable_reorder;
392
393         return 0;
394 }
395
396 int
397 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
398 {
399         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
400         struct scheduler_ctx *sched_ctx;
401
402         if (!dev) {
403                 CR_SCHED_LOG(ERR, "Operation not supported");
404                 return -ENOTSUP;
405         }
406
407         if (dev->driver_id != cryptodev_scheduler_driver_id) {
408                 CR_SCHED_LOG(ERR, "Operation not supported");
409                 return -ENOTSUP;
410         }
411
412         sched_ctx = dev->data->dev_private;
413
414         return (int)sched_ctx->reordering_enabled;
415 }
416
417 int
418 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
419                 struct rte_cryptodev_scheduler *scheduler) {
420
421         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
422         struct scheduler_ctx *sched_ctx;
423
424         if (!dev) {
425                 CR_SCHED_LOG(ERR, "Operation not supported");
426                 return -ENOTSUP;
427         }
428
429         if (dev->driver_id != cryptodev_scheduler_driver_id) {
430                 CR_SCHED_LOG(ERR, "Operation not supported");
431                 return -ENOTSUP;
432         }
433
434         if (dev->data->dev_started) {
435                 CR_SCHED_LOG(ERR, "Illegal operation");
436                 return -EBUSY;
437         }
438
439         sched_ctx = dev->data->dev_private;
440
441         if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
442                 CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
443                                 "%u bytes.", scheduler->name,
444                                 RTE_CRYPTODEV_NAME_MAX_LEN);
445                 return -EINVAL;
446         }
447         strlcpy(sched_ctx->name, scheduler->name, sizeof(sched_ctx->name));
448
449         if (strlen(scheduler->description) >
450                         RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
451                 CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
452                                 "%u bytes.", scheduler->description,
453                                 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
454                 return -EINVAL;
455         }
456         strlcpy(sched_ctx->description, scheduler->description,
457                 sizeof(sched_ctx->description));
458
459         /* load scheduler instance operations functions */
460         sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
461         sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
462         sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
463         sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
464         sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
465         sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
466         sched_ctx->ops.option_set = scheduler->ops->option_set;
467         sched_ctx->ops.option_get = scheduler->ops->option_get;
468
469         if (sched_ctx->private_ctx) {
470                 rte_free(sched_ctx->private_ctx);
471                 sched_ctx->private_ctx = NULL;
472         }
473
474         if (sched_ctx->ops.create_private_ctx) {
475                 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
476
477                 if (ret < 0) {
478                         CR_SCHED_LOG(ERR, "Unable to create scheduler private "
479                                         "context");
480                         return ret;
481                 }
482         }
483
484         sched_ctx->mode = scheduler->mode;
485
486         return 0;
487 }
488
489 int
490 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
491 {
492         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
493         struct scheduler_ctx *sched_ctx;
494         uint32_t nb_slaves = 0;
495
496         if (!dev) {
497                 CR_SCHED_LOG(ERR, "Operation not supported");
498                 return -ENOTSUP;
499         }
500
501         if (dev->driver_id != cryptodev_scheduler_driver_id) {
502                 CR_SCHED_LOG(ERR, "Operation not supported");
503                 return -ENOTSUP;
504         }
505
506         sched_ctx = dev->data->dev_private;
507
508         nb_slaves = sched_ctx->nb_slaves;
509
510         if (slaves && nb_slaves) {
511                 uint32_t i;
512
513                 for (i = 0; i < nb_slaves; i++)
514                         slaves[i] = sched_ctx->slaves[i].dev_id;
515         }
516
517         return (int)nb_slaves;
518 }
519
520 int
521 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
522                 enum rte_cryptodev_schedule_option_type option_type,
523                 void *option)
524 {
525         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
526         struct scheduler_ctx *sched_ctx;
527
528         if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
529                         option_type >= CDEV_SCHED_OPTION_COUNT) {
530                 CR_SCHED_LOG(ERR, "Invalid option parameter");
531                 return -EINVAL;
532         }
533
534         if (!option) {
535                 CR_SCHED_LOG(ERR, "Invalid option parameter");
536                 return -EINVAL;
537         }
538
539         if (dev->data->dev_started) {
540                 CR_SCHED_LOG(ERR, "Illegal operation");
541                 return -EBUSY;
542         }
543
544         sched_ctx = dev->data->dev_private;
545
546         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
547
548         return (*sched_ctx->ops.option_set)(dev, option_type, option);
549 }
550
551 int
552 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
553                 enum rte_cryptodev_schedule_option_type option_type,
554                 void *option)
555 {
556         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
557         struct scheduler_ctx *sched_ctx;
558
559         if (!dev) {
560                 CR_SCHED_LOG(ERR, "Operation not supported");
561                 return -ENOTSUP;
562         }
563
564         if (!option) {
565                 CR_SCHED_LOG(ERR, "Invalid option parameter");
566                 return -EINVAL;
567         }
568
569         if (dev->driver_id != cryptodev_scheduler_driver_id) {
570                 CR_SCHED_LOG(ERR, "Operation not supported");
571                 return -ENOTSUP;
572         }
573
574         sched_ctx = dev->data->dev_private;
575
576         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
577
578         return (*sched_ctx->ops.option_get)(dev, option_type, option);
579 }
580
581 RTE_INIT(scheduler_init_log)
582 {
583         scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
584 }