4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_common.h>
33 #include <rte_hexdump.h>
34 #include <rte_cryptodev.h>
35 #include <rte_cryptodev_pmd.h>
36 #include <rte_cryptodev_vdev.h>
38 #include <rte_malloc.h>
39 #include <rte_cpuflags.h>
40 #include <rte_reorder.h>
42 #include "rte_cryptodev_scheduler.h"
43 #include "scheduler_pmd_private.h"
45 uint8_t cryptodev_driver_id;
47 struct scheduler_init_params {
48 struct rte_crypto_vdev_init_params def_p;
50 enum rte_cryptodev_scheduler_mode mode;
51 uint32_t enable_ordering;
52 char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
53 [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
56 #define RTE_CRYPTODEV_VDEV_NAME ("name")
57 #define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
58 #define RTE_CRYPTODEV_VDEV_MODE ("mode")
59 #define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
60 #define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
61 #define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
62 #define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
64 const char *scheduler_valid_params[] = {
65 RTE_CRYPTODEV_VDEV_NAME,
66 RTE_CRYPTODEV_VDEV_SLAVE,
67 RTE_CRYPTODEV_VDEV_MODE,
68 RTE_CRYPTODEV_VDEV_ORDERING,
69 RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
70 RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
71 RTE_CRYPTODEV_VDEV_SOCKET_ID
74 struct scheduler_parse_map {
79 const struct scheduler_parse_map scheduler_mode_map[] = {
80 {RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN),
81 CDEV_SCHED_MODE_ROUNDROBIN},
82 {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
83 CDEV_SCHED_MODE_PKT_SIZE_DISTR},
84 {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
85 CDEV_SCHED_MODE_FAILOVER}
88 const struct scheduler_parse_map scheduler_ordering_map[] = {
94 cryptodev_scheduler_create(const char *name,
95 struct rte_vdev_device *vdev,
96 struct scheduler_init_params *init_params)
98 struct rte_cryptodev *dev;
99 struct scheduler_ctx *sched_ctx;
103 if (init_params->def_p.name[0] == '\0')
104 snprintf(init_params->def_p.name,
105 sizeof(init_params->def_p.name),
108 dev = rte_cryptodev_vdev_pmd_init(init_params->def_p.name,
109 sizeof(struct scheduler_ctx),
110 init_params->def_p.socket_id,
113 CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
118 dev->driver_id = cryptodev_driver_id;
119 dev->dev_ops = rte_crypto_scheduler_pmd_ops;
121 sched_ctx = dev->data->dev_private;
122 sched_ctx->max_nb_queue_pairs =
123 init_params->def_p.max_nb_queue_pairs;
125 if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
126 init_params->mode < CDEV_SCHED_MODE_COUNT) {
127 ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
130 rte_cryptodev_pmd_release_device(dev);
134 for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
135 if (scheduler_mode_map[i].val != sched_ctx->mode)
138 RTE_LOG(INFO, PMD, " Scheduling mode = %s\n",
139 scheduler_mode_map[i].name);
144 sched_ctx->reordering_enabled = init_params->enable_ordering;
146 for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
147 if (scheduler_ordering_map[i].val !=
148 sched_ctx->reordering_enabled)
151 RTE_LOG(INFO, PMD, " Packet ordering = %s\n",
152 scheduler_ordering_map[i].name);
157 for (i = 0; i < init_params->nb_slaves; i++) {
158 sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
161 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
164 if (!sched_ctx->init_slave_names[
165 sched_ctx->nb_init_slaves]) {
166 CS_LOG_ERR("driver %s: Insufficient memory",
171 strncpy(sched_ctx->init_slave_names[
172 sched_ctx->nb_init_slaves],
173 init_params->slave_names[i],
174 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
176 sched_ctx->nb_init_slaves++;
180 * Initialize capabilities structure as an empty structure,
181 * in case device information is requested when no slaves are attached
183 sched_ctx->capabilities = rte_zmalloc_socket(NULL,
184 sizeof(struct rte_cryptodev_capabilities),
187 if (!sched_ctx->capabilities) {
188 RTE_LOG(ERR, PMD, "Not enough memory for capability "
197 cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
200 struct rte_cryptodev *dev;
201 struct scheduler_ctx *sched_ctx;
206 name = rte_vdev_device_name(vdev);
207 dev = rte_cryptodev_pmd_get_named_dev(name);
211 sched_ctx = dev->data->dev_private;
213 if (sched_ctx->nb_slaves) {
216 for (i = 0; i < sched_ctx->nb_slaves; i++)
217 rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
218 sched_ctx->slaves[i].dev_id);
221 RTE_LOG(INFO, PMD, "Closing Crypto Scheduler device %s on numa "
222 "socket %u\n", name, rte_socket_id());
227 /** Parse integer from integer argument */
229 parse_integer_arg(const char *key __rte_unused,
230 const char *value, void *extra_args)
232 int *i = (int *) extra_args;
236 CS_LOG_ERR("Argument has to be positive.\n");
245 parse_name_arg(const char *key __rte_unused,
246 const char *value, void *extra_args)
248 struct rte_crypto_vdev_init_params *params = extra_args;
250 if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
251 CS_LOG_ERR("Invalid name %s, should be less than "
252 "%u bytes.\n", value,
253 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
257 strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
264 parse_slave_arg(const char *key __rte_unused,
265 const char *value, void *extra_args)
267 struct scheduler_init_params *param = extra_args;
269 if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
270 CS_LOG_ERR("Too many slaves.\n");
274 strncpy(param->slave_names[param->nb_slaves++], value,
275 RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
281 parse_mode_arg(const char *key __rte_unused,
282 const char *value, void *extra_args)
284 struct scheduler_init_params *param = extra_args;
287 for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
288 if (strcmp(value, scheduler_mode_map[i].name) == 0) {
289 param->mode = (enum rte_cryptodev_scheduler_mode)
290 scheduler_mode_map[i].val;
295 if (i == RTE_DIM(scheduler_mode_map)) {
296 CS_LOG_ERR("Unrecognized input.\n");
304 parse_ordering_arg(const char *key __rte_unused,
305 const char *value, void *extra_args)
307 struct scheduler_init_params *param = extra_args;
310 for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
311 if (strcmp(value, scheduler_ordering_map[i].name) == 0) {
312 param->enable_ordering =
313 scheduler_ordering_map[i].val;
318 if (i == RTE_DIM(scheduler_ordering_map)) {
319 CS_LOG_ERR("Unrecognized input.\n");
327 scheduler_parse_init_params(struct scheduler_init_params *params,
328 const char *input_args)
330 struct rte_kvargs *kvlist = NULL;
337 kvlist = rte_kvargs_parse(input_args,
338 scheduler_valid_params);
342 ret = rte_kvargs_process(kvlist,
343 RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
345 ¶ms->def_p.max_nb_queue_pairs);
349 ret = rte_kvargs_process(kvlist,
350 RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
352 ¶ms->def_p.max_nb_sessions);
356 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
358 ¶ms->def_p.socket_id);
362 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
368 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
369 &parse_slave_arg, params);
373 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE,
374 &parse_mode_arg, params);
378 ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
379 &parse_ordering_arg, params);
385 rte_kvargs_free(kvlist);
390 cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
392 struct scheduler_init_params init_params = {
394 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
395 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
400 .mode = CDEV_SCHED_MODE_NOT_SET,
401 .enable_ordering = 0,
402 .slave_names = { {0} }
406 name = rte_vdev_device_name(vdev);
410 scheduler_parse_init_params(&init_params,
411 rte_vdev_device_args(vdev));
413 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
415 init_params.def_p.socket_id);
416 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
417 init_params.def_p.max_nb_queue_pairs);
418 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
419 init_params.def_p.max_nb_sessions);
420 if (init_params.def_p.name[0] != '\0')
421 RTE_LOG(INFO, PMD, " User defined name = %s\n",
422 init_params.def_p.name);
424 return cryptodev_scheduler_create(name,
429 static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
430 .probe = cryptodev_scheduler_probe,
431 .remove = cryptodev_scheduler_remove
434 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
435 cryptodev_scheduler_pmd_drv);
436 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
437 "max_nb_queue_pairs=<int> "
438 "max_nb_sessions=<int> "
441 RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_scheduler_pmd_drv,
442 cryptodev_driver_id);