1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_common.h>
7 #include <rte_malloc.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_reorder.h>
13 #include "scheduler_pmd_private.h"
15 /** attaching the slaves predefined by scheduler's EAL options */
17 scheduler_attach_init_slave(struct rte_cryptodev *dev)
19 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20 uint8_t scheduler_id = dev->data->dev_id;
23 for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
24 const char *dev_name = sched_ctx->init_slave_names[i];
25 struct rte_cryptodev *slave_dev =
26 rte_cryptodev_pmd_get_named_dev(dev_name);
30 CS_LOG_ERR("Failed to locate slave dev %s",
35 status = rte_cryptodev_scheduler_slave_attach(
36 scheduler_id, slave_dev->data->dev_id);
39 CS_LOG_ERR("Failed to attach slave cryptodev %u",
40 slave_dev->data->dev_id);
44 CS_LOG_INFO("Scheduler %s attached slave %s\n",
46 sched_ctx->init_slave_names[i]);
48 rte_free(sched_ctx->init_slave_names[i]);
50 sched_ctx->nb_init_slaves -= 1;
55 /** Configure device */
57 scheduler_pmd_config(struct rte_cryptodev *dev,
58 struct rte_cryptodev_config *config)
60 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
64 /* although scheduler_attach_init_slave presents multiple times,
65 * there will be only 1 meaningful execution.
67 ret = scheduler_attach_init_slave(dev);
71 for (i = 0; i < sched_ctx->nb_slaves; i++) {
72 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
74 ret = rte_cryptodev_configure(slave_dev_id, config);
83 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
85 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
86 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
88 if (sched_ctx->reordering_enabled) {
89 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
90 uint32_t buff_size = rte_align32pow2(
91 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
93 if (qp_ctx->order_ring) {
94 rte_ring_free(qp_ctx->order_ring);
95 qp_ctx->order_ring = NULL;
101 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
102 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
103 dev->data->dev_id, qp_id) < 0) {
104 CS_LOG_ERR("failed to create unique reorder buffer "
109 qp_ctx->order_ring = rte_ring_create(order_ring_name,
110 buff_size, rte_socket_id(),
111 RING_F_SP_ENQ | RING_F_SC_DEQ);
112 if (!qp_ctx->order_ring) {
113 CS_LOG_ERR("failed to create order ring");
117 if (qp_ctx->order_ring) {
118 rte_ring_free(qp_ctx->order_ring);
119 qp_ctx->order_ring = NULL;
128 scheduler_pmd_start(struct rte_cryptodev *dev)
130 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
134 if (dev->data->dev_started)
137 /* although scheduler_attach_init_slave presents multiple times,
138 * there will be only 1 meaningful execution.
140 ret = scheduler_attach_init_slave(dev);
144 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
145 ret = update_order_ring(dev, i);
147 CS_LOG_ERR("Failed to update reorder buffer");
152 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
153 CS_LOG_ERR("Scheduler mode is not set");
157 if (!sched_ctx->nb_slaves) {
158 CS_LOG_ERR("No slave in the scheduler");
162 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
164 for (i = 0; i < sched_ctx->nb_slaves; i++) {
165 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
167 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
168 CS_LOG_ERR("Failed to attach slave");
173 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
175 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
176 CS_LOG_ERR("Scheduler start failed");
180 /* start all slaves */
181 for (i = 0; i < sched_ctx->nb_slaves; i++) {
182 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
183 struct rte_cryptodev *slave_dev =
184 rte_cryptodev_pmd_get_dev(slave_dev_id);
186 ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
188 CS_LOG_ERR("Failed to start slave dev %u",
199 scheduler_pmd_stop(struct rte_cryptodev *dev)
201 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
204 if (!dev->data->dev_started)
207 /* stop all slaves first */
208 for (i = 0; i < sched_ctx->nb_slaves; i++) {
209 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
210 struct rte_cryptodev *slave_dev =
211 rte_cryptodev_pmd_get_dev(slave_dev_id);
213 (*slave_dev->dev_ops->dev_stop)(slave_dev);
216 if (*sched_ctx->ops.scheduler_stop)
217 (*sched_ctx->ops.scheduler_stop)(dev);
219 for (i = 0; i < sched_ctx->nb_slaves; i++) {
220 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
222 if (*sched_ctx->ops.slave_detach)
223 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
229 scheduler_pmd_close(struct rte_cryptodev *dev)
231 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
235 /* the dev should be stopped before being closed */
236 if (dev->data->dev_started)
239 /* close all slaves first */
240 for (i = 0; i < sched_ctx->nb_slaves; i++) {
241 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
242 struct rte_cryptodev *slave_dev =
243 rte_cryptodev_pmd_get_dev(slave_dev_id);
245 ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
250 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
251 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
253 if (qp_ctx->order_ring) {
254 rte_ring_free(qp_ctx->order_ring);
255 qp_ctx->order_ring = NULL;
258 if (qp_ctx->private_qp_ctx) {
259 rte_free(qp_ctx->private_qp_ctx);
260 qp_ctx->private_qp_ctx = NULL;
264 if (sched_ctx->private_ctx)
265 rte_free(sched_ctx->private_ctx);
267 if (sched_ctx->capabilities)
268 rte_free(sched_ctx->capabilities);
273 /** Get device statistics */
275 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
276 struct rte_cryptodev_stats *stats)
278 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
281 for (i = 0; i < sched_ctx->nb_slaves; i++) {
282 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
283 struct rte_cryptodev *slave_dev =
284 rte_cryptodev_pmd_get_dev(slave_dev_id);
285 struct rte_cryptodev_stats slave_stats = {0};
287 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
289 stats->enqueued_count += slave_stats.enqueued_count;
290 stats->dequeued_count += slave_stats.dequeued_count;
292 stats->enqueue_err_count += slave_stats.enqueue_err_count;
293 stats->dequeue_err_count += slave_stats.dequeue_err_count;
297 /** Reset device statistics */
299 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
301 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
304 for (i = 0; i < sched_ctx->nb_slaves; i++) {
305 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
306 struct rte_cryptodev *slave_dev =
307 rte_cryptodev_pmd_get_dev(slave_dev_id);
309 (*slave_dev->dev_ops->stats_reset)(slave_dev);
313 /** Get device info */
315 scheduler_pmd_info_get(struct rte_cryptodev *dev,
316 struct rte_cryptodev_info *dev_info)
318 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
319 uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
320 UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
326 /* although scheduler_attach_init_slave presents multiple times,
327 * there will be only 1 meaningful execution.
329 scheduler_attach_init_slave(dev);
331 for (i = 0; i < sched_ctx->nb_slaves; i++) {
332 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
333 struct rte_cryptodev_info slave_info;
335 rte_cryptodev_info_get(slave_dev_id, &slave_info);
336 max_nb_sessions = slave_info.sym.max_nb_sessions <
338 slave_info.sym.max_nb_sessions :
342 dev_info->driver_id = dev->driver_id;
343 dev_info->feature_flags = dev->feature_flags;
344 dev_info->capabilities = sched_ctx->capabilities;
345 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
346 dev_info->sym.max_nb_sessions = max_nb_sessions;
349 /** Release queue pair */
351 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
353 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
358 if (qp_ctx->order_ring)
359 rte_ring_free(qp_ctx->order_ring);
360 if (qp_ctx->private_qp_ctx)
361 rte_free(qp_ctx->private_qp_ctx);
364 dev->data->queue_pairs[qp_id] = NULL;
369 /** Setup a queue pair */
371 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
372 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
373 struct rte_mempool *session_pool)
375 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
376 struct scheduler_qp_ctx *qp_ctx;
377 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
381 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
382 "CRYTO_SCHE PMD %u QP %u",
383 dev->data->dev_id, qp_id) < 0) {
384 CS_LOG_ERR("Failed to create unique queue pair name");
388 /* Free memory prior to re-allocation if needed. */
389 if (dev->data->queue_pairs[qp_id] != NULL)
390 scheduler_pmd_qp_release(dev, qp_id);
392 for (i = 0; i < sched_ctx->nb_slaves; i++) {
393 uint8_t slave_id = sched_ctx->slaves[i].dev_id;
396 * All slaves will share the same session mempool
397 * for session-less operations, so the objects
398 * must be big enough for all the drivers used.
400 ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
401 qp_conf, socket_id, session_pool);
406 /* Allocate the queue pair data structure. */
407 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
412 /* The actual available object number = nb_descriptors - 1 */
413 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
415 dev->data->queue_pairs[qp_id] = qp_ctx;
417 /* although scheduler_attach_init_slave presents multiple times,
418 * there will be only 1 meaningful execution.
420 ret = scheduler_attach_init_slave(dev);
422 CS_LOG_ERR("Failed to attach slave");
423 scheduler_pmd_qp_release(dev, qp_id);
427 if (*sched_ctx->ops.config_queue_pair) {
428 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
429 CS_LOG_ERR("Unable to configure queue pair");
437 /** Start queue pair */
439 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
440 __rte_unused uint16_t queue_pair_id)
445 /** Stop queue pair */
447 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
448 __rte_unused uint16_t queue_pair_id)
453 /** Return the number of allocated queue pairs */
455 scheduler_pmd_qp_count(struct rte_cryptodev *dev)
457 return dev->data->nb_queue_pairs;
461 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
463 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
465 uint32_t max_priv_sess_size = 0;
467 /* Check what is the maximum private session size for all slaves */
468 for (i = 0; i < sched_ctx->nb_slaves; i++) {
469 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
470 struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
471 uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
473 if (max_priv_sess_size < priv_sess_size)
474 max_priv_sess_size = priv_sess_size;
477 return max_priv_sess_size;
481 scheduler_pmd_session_configure(struct rte_cryptodev *dev,
482 struct rte_crypto_sym_xform *xform,
483 struct rte_cryptodev_sym_session *sess,
484 struct rte_mempool *mempool)
486 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
490 for (i = 0; i < sched_ctx->nb_slaves; i++) {
491 struct scheduler_slave *slave = &sched_ctx->slaves[i];
493 ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
496 CS_LOG_ERR("unabled to config sym session");
504 /** Clear the memory of session so it doesn't leave key material behind */
506 scheduler_pmd_session_clear(struct rte_cryptodev *dev,
507 struct rte_cryptodev_sym_session *sess)
509 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
512 /* Clear private data of slaves */
513 for (i = 0; i < sched_ctx->nb_slaves; i++) {
514 struct scheduler_slave *slave = &sched_ctx->slaves[i];
516 rte_cryptodev_sym_session_clear(slave->dev_id, sess);
520 struct rte_cryptodev_ops scheduler_pmd_ops = {
521 .dev_configure = scheduler_pmd_config,
522 .dev_start = scheduler_pmd_start,
523 .dev_stop = scheduler_pmd_stop,
524 .dev_close = scheduler_pmd_close,
526 .stats_get = scheduler_pmd_stats_get,
527 .stats_reset = scheduler_pmd_stats_reset,
529 .dev_infos_get = scheduler_pmd_info_get,
531 .queue_pair_setup = scheduler_pmd_qp_setup,
532 .queue_pair_release = scheduler_pmd_qp_release,
533 .queue_pair_start = scheduler_pmd_qp_start,
534 .queue_pair_stop = scheduler_pmd_qp_stop,
535 .queue_pair_count = scheduler_pmd_qp_count,
537 .session_get_size = scheduler_pmd_session_get_size,
538 .session_configure = scheduler_pmd_session_configure,
539 .session_clear = scheduler_pmd_session_clear,
542 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;