4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_config.h>
35 #include <rte_common.h>
36 #include <rte_malloc.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
40 #include <rte_reorder.h>
42 #include "scheduler_pmd_private.h"
44 /** attaching the slaves predefined by scheduler's EAL options */
46 scheduler_attach_init_slave(struct rte_cryptodev *dev)
48 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
49 uint8_t scheduler_id = dev->data->dev_id;
52 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
53 const char *dev_name = sched_ctx->init_slave_names[i];
54 struct rte_cryptodev *slave_dev =
55 rte_cryptodev_pmd_get_named_dev(dev_name);
59 CS_LOG_ERR("Failed to locate slave dev %s",
64 status = rte_cryptodev_scheduler_slave_attach(
65 scheduler_id, slave_dev->data->dev_id);
68 CS_LOG_ERR("Failed to attach slave cryptodev %u",
69 slave_dev->data->dev_id);
73 CS_LOG_INFO("Scheduler %s attached slave %s\n",
75 sched_ctx->init_slave_names[i]);
77 rte_free(sched_ctx->init_slave_names[i]);
79 sched_ctx->nb_init_slaves -= 1;
84 /** Configure device */
86 scheduler_pmd_config(struct rte_cryptodev *dev,
87 struct rte_cryptodev_config *config)
89 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
93 /* although scheduler_attach_init_slave presents multiple times,
94 * there will be only 1 meaningful execution.
96 ret = scheduler_attach_init_slave(dev);
100 for (i = 0; i < sched_ctx->nb_slaves; i++) {
101 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
103 ret = rte_cryptodev_configure(slave_dev_id, config);
112 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
114 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
115 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
117 if (sched_ctx->reordering_enabled) {
118 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
119 uint32_t buff_size = rte_align32pow2(
120 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
122 if (qp_ctx->order_ring) {
123 rte_ring_free(qp_ctx->order_ring);
124 qp_ctx->order_ring = NULL;
130 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
131 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
132 dev->data->dev_id, qp_id) < 0) {
133 CS_LOG_ERR("failed to create unique reorder buffer "
138 qp_ctx->order_ring = rte_ring_create(order_ring_name,
139 buff_size, rte_socket_id(),
140 RING_F_SP_ENQ | RING_F_SC_DEQ);
141 if (!qp_ctx->order_ring) {
142 CS_LOG_ERR("failed to create order ring");
146 if (qp_ctx->order_ring) {
147 rte_ring_free(qp_ctx->order_ring);
148 qp_ctx->order_ring = NULL;
157 scheduler_pmd_start(struct rte_cryptodev *dev)
159 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
163 if (dev->data->dev_started)
166 /* although scheduler_attach_init_slave presents multiple times,
167 * there will be only 1 meaningful execution.
169 ret = scheduler_attach_init_slave(dev);
173 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
174 ret = update_order_ring(dev, i);
176 CS_LOG_ERR("Failed to update reorder buffer");
181 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
182 CS_LOG_ERR("Scheduler mode is not set");
186 if (!sched_ctx->nb_slaves) {
187 CS_LOG_ERR("No slave in the scheduler");
191 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
193 for (i = 0; i < sched_ctx->nb_slaves; i++) {
194 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
196 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
197 CS_LOG_ERR("Failed to attach slave");
202 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
204 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
205 CS_LOG_ERR("Scheduler start failed");
209 /* start all slaves */
210 for (i = 0; i < sched_ctx->nb_slaves; i++) {
211 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
212 struct rte_cryptodev *slave_dev =
213 rte_cryptodev_pmd_get_dev(slave_dev_id);
215 ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
217 CS_LOG_ERR("Failed to start slave dev %u",
228 scheduler_pmd_stop(struct rte_cryptodev *dev)
230 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
233 if (!dev->data->dev_started)
236 /* stop all slaves first */
237 for (i = 0; i < sched_ctx->nb_slaves; i++) {
238 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
239 struct rte_cryptodev *slave_dev =
240 rte_cryptodev_pmd_get_dev(slave_dev_id);
242 (*slave_dev->dev_ops->dev_stop)(slave_dev);
245 if (*sched_ctx->ops.scheduler_stop)
246 (*sched_ctx->ops.scheduler_stop)(dev);
248 for (i = 0; i < sched_ctx->nb_slaves; i++) {
249 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
251 if (*sched_ctx->ops.slave_detach)
252 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
258 scheduler_pmd_close(struct rte_cryptodev *dev)
260 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
264 /* the dev should be stopped before being closed */
265 if (dev->data->dev_started)
268 /* close all slaves first */
269 for (i = 0; i < sched_ctx->nb_slaves; i++) {
270 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
271 struct rte_cryptodev *slave_dev =
272 rte_cryptodev_pmd_get_dev(slave_dev_id);
274 ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
279 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
280 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
282 if (qp_ctx->order_ring) {
283 rte_ring_free(qp_ctx->order_ring);
284 qp_ctx->order_ring = NULL;
287 if (qp_ctx->private_qp_ctx) {
288 rte_free(qp_ctx->private_qp_ctx);
289 qp_ctx->private_qp_ctx = NULL;
293 if (sched_ctx->private_ctx)
294 rte_free(sched_ctx->private_ctx);
296 if (sched_ctx->capabilities)
297 rte_free(sched_ctx->capabilities);
302 /** Get device statistics */
304 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
305 struct rte_cryptodev_stats *stats)
307 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
310 for (i = 0; i < sched_ctx->nb_slaves; i++) {
311 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
312 struct rte_cryptodev *slave_dev =
313 rte_cryptodev_pmd_get_dev(slave_dev_id);
314 struct rte_cryptodev_stats slave_stats = {0};
316 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
318 stats->enqueued_count += slave_stats.enqueued_count;
319 stats->dequeued_count += slave_stats.dequeued_count;
321 stats->enqueue_err_count += slave_stats.enqueue_err_count;
322 stats->dequeue_err_count += slave_stats.dequeue_err_count;
326 /** Reset device statistics */
328 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
330 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
333 for (i = 0; i < sched_ctx->nb_slaves; i++) {
334 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
335 struct rte_cryptodev *slave_dev =
336 rte_cryptodev_pmd_get_dev(slave_dev_id);
338 (*slave_dev->dev_ops->stats_reset)(slave_dev);
342 /** Get device info */
344 scheduler_pmd_info_get(struct rte_cryptodev *dev,
345 struct rte_cryptodev_info *dev_info)
347 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
348 uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
349 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
355 /* although scheduler_attach_init_slave presents multiple times,
356 * there will be only 1 meaningful execution.
358 scheduler_attach_init_slave(dev);
360 for (i = 0; i < sched_ctx->nb_slaves; i++) {
361 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
362 struct rte_cryptodev_info slave_info;
364 rte_cryptodev_info_get(slave_dev_id, &slave_info);
365 max_nb_sessions = slave_info.sym.max_nb_sessions <
367 slave_info.sym.max_nb_sessions :
371 dev_info->dev_type = dev->dev_type;
372 dev_info->feature_flags = dev->feature_flags;
373 dev_info->capabilities = sched_ctx->capabilities;
374 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
375 dev_info->sym.max_nb_sessions = max_nb_sessions;
378 /** Release queue pair */
380 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
382 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
387 if (qp_ctx->order_ring)
388 rte_ring_free(qp_ctx->order_ring);
389 if (qp_ctx->private_qp_ctx)
390 rte_free(qp_ctx->private_qp_ctx);
393 dev->data->queue_pairs[qp_id] = NULL;
398 /** Setup a queue pair */
400 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
401 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
403 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
404 struct scheduler_qp_ctx *qp_ctx;
405 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
409 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
410 "CRYTO_SCHE PMD %u QP %u",
411 dev->data->dev_id, qp_id) < 0) {
412 CS_LOG_ERR("Failed to create unique queue pair name");
416 /* Free memory prior to re-allocation if needed. */
417 if (dev->data->queue_pairs[qp_id] != NULL)
418 scheduler_pmd_qp_release(dev, qp_id);
420 for (i = 0; i < sched_ctx->nb_slaves; i++) {
421 uint8_t slave_id = sched_ctx->slaves[i].dev_id;
423 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
429 /* Allocate the queue pair data structure. */
430 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
435 /* The actual available object number = nb_descriptors - 1 */
436 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
438 dev->data->queue_pairs[qp_id] = qp_ctx;
440 /* although scheduler_attach_init_slave presents multiple times,
441 * there will be only 1 meaningful execution.
443 ret = scheduler_attach_init_slave(dev);
445 CS_LOG_ERR("Failed to attach slave");
446 scheduler_pmd_qp_release(dev, qp_id);
450 if (*sched_ctx->ops.config_queue_pair) {
451 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
452 CS_LOG_ERR("Unable to configure queue pair");
460 /** Start queue pair */
462 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
463 __rte_unused uint16_t queue_pair_id)
468 /** Stop queue pair */
470 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
471 __rte_unused uint16_t queue_pair_id)
476 /** Return the number of allocated queue pairs */
478 scheduler_pmd_qp_count(struct rte_cryptodev *dev)
480 return dev->data->nb_queue_pairs;
484 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
486 return sizeof(struct scheduler_session);
490 config_slave_sess(struct scheduler_ctx *sched_ctx,
491 struct rte_crypto_sym_xform *xform,
492 struct scheduler_session *sess,
497 for (i = 0; i < sched_ctx->nb_slaves; i++) {
498 struct scheduler_slave *slave = &sched_ctx->slaves[i];
500 if (sess->sessions[i]) {
504 sess->sessions[i] = rte_cryptodev_sym_session_free(
505 slave->dev_id, sess->sessions[i]);
511 rte_cryptodev_sym_session_create(
512 slave->dev_id, xform);
513 if (!sess->sessions[i]) {
514 config_slave_sess(sched_ctx, NULL, sess, 0);
523 /** Clear the memory of session so it doesn't leave key material behind */
525 scheduler_pmd_session_clear(struct rte_cryptodev *dev,
528 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
530 config_slave_sess(sched_ctx, NULL, sess, 0);
532 memset(sess, 0, sizeof(struct scheduler_session));
536 scheduler_pmd_session_configure(struct rte_cryptodev *dev,
537 struct rte_crypto_sym_xform *xform, void *sess)
539 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
541 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
542 CS_LOG_ERR("unabled to config sym session");
549 struct rte_cryptodev_ops scheduler_pmd_ops = {
550 .dev_configure = scheduler_pmd_config,
551 .dev_start = scheduler_pmd_start,
552 .dev_stop = scheduler_pmd_stop,
553 .dev_close = scheduler_pmd_close,
555 .stats_get = scheduler_pmd_stats_get,
556 .stats_reset = scheduler_pmd_stats_reset,
558 .dev_infos_get = scheduler_pmd_info_get,
560 .queue_pair_setup = scheduler_pmd_qp_setup,
561 .queue_pair_release = scheduler_pmd_qp_release,
562 .queue_pair_start = scheduler_pmd_qp_start,
563 .queue_pair_stop = scheduler_pmd_qp_stop,
564 .queue_pair_count = scheduler_pmd_qp_count,
566 .session_get_size = scheduler_pmd_session_get_size,
567 .session_configure = scheduler_pmd_session_configure,
568 .session_clear = scheduler_pmd_session_clear,
571 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;