1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_common.h>
7 #include <rte_malloc.h>
9 #include <rte_cryptodev.h>
10 #include <cryptodev_pmd.h>
11 #include <rte_reorder.h>
13 #include "scheduler_pmd_private.h"
15 /** attaching the workers predefined by scheduler's EAL options */
17 scheduler_attach_init_worker(struct rte_cryptodev *dev)
19 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
20 uint8_t scheduler_id = dev->data->dev_id;
23 for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
24 const char *dev_name = sched_ctx->init_worker_names[i];
25 struct rte_cryptodev *worker_dev =
26 rte_cryptodev_pmd_get_named_dev(dev_name);
30 CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
35 status = rte_cryptodev_scheduler_worker_attach(
36 scheduler_id, worker_dev->data->dev_id);
39 CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
40 worker_dev->data->dev_id);
44 CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
46 sched_ctx->init_worker_names[i]);
48 rte_free(sched_ctx->init_worker_names[i]);
49 sched_ctx->init_worker_names[i] = NULL;
51 sched_ctx->nb_init_workers -= 1;
56 /** Configure device */
58 scheduler_pmd_config(struct rte_cryptodev *dev,
59 struct rte_cryptodev_config *config)
61 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
65 /* although scheduler_attach_init_worker presents multiple times,
66 * there will be only 1 meaningful execution.
68 ret = scheduler_attach_init_worker(dev);
72 for (i = 0; i < sched_ctx->nb_workers; i++) {
73 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
75 ret = rte_cryptodev_configure(worker_dev_id, config);
84 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
86 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
87 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
89 if (sched_ctx->reordering_enabled) {
90 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
91 uint32_t buff_size = rte_align32pow2(
92 sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
94 if (qp_ctx->order_ring) {
95 rte_ring_free(qp_ctx->order_ring);
96 qp_ctx->order_ring = NULL;
102 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
103 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
104 dev->data->dev_id, qp_id) < 0) {
105 CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
110 qp_ctx->order_ring = rte_ring_create(order_ring_name,
111 buff_size, rte_socket_id(),
112 RING_F_SP_ENQ | RING_F_SC_DEQ);
113 if (!qp_ctx->order_ring) {
114 CR_SCHED_LOG(ERR, "failed to create order ring");
118 if (qp_ctx->order_ring) {
119 rte_ring_free(qp_ctx->order_ring);
120 qp_ctx->order_ring = NULL;
129 scheduler_pmd_start(struct rte_cryptodev *dev)
131 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
135 if (dev->data->dev_started)
138 /* although scheduler_attach_init_worker presents multiple times,
139 * there will be only 1 meaningful execution.
141 ret = scheduler_attach_init_worker(dev);
145 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
146 ret = update_order_ring(dev, i);
148 CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
153 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
154 CR_SCHED_LOG(ERR, "Scheduler mode is not set");
158 if (!sched_ctx->nb_workers) {
159 CR_SCHED_LOG(ERR, "No worker in the scheduler");
163 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);
165 for (i = 0; i < sched_ctx->nb_workers; i++) {
166 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
168 if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
169 CR_SCHED_LOG(ERR, "Failed to attach worker");
174 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
176 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
177 CR_SCHED_LOG(ERR, "Scheduler start failed");
181 /* start all workers */
182 for (i = 0; i < sched_ctx->nb_workers; i++) {
183 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
184 ret = rte_cryptodev_start(worker_dev_id);
186 CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
197 scheduler_pmd_stop(struct rte_cryptodev *dev)
199 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
202 if (!dev->data->dev_started)
205 /* stop all workers first */
206 for (i = 0; i < sched_ctx->nb_workers; i++) {
207 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
209 rte_cryptodev_stop(worker_dev_id);
212 if (*sched_ctx->ops.scheduler_stop)
213 (*sched_ctx->ops.scheduler_stop)(dev);
215 for (i = 0; i < sched_ctx->nb_workers; i++) {
216 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
218 if (*sched_ctx->ops.worker_detach)
219 (*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
225 scheduler_pmd_close(struct rte_cryptodev *dev)
227 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
231 /* the dev should be stopped before being closed */
232 if (dev->data->dev_started)
235 /* close all workers first */
236 for (i = 0; i < sched_ctx->nb_workers; i++) {
237 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
238 struct rte_cryptodev *worker_dev =
239 rte_cryptodev_pmd_get_dev(worker_dev_id);
241 ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
246 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
247 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
249 if (qp_ctx->order_ring) {
250 rte_ring_free(qp_ctx->order_ring);
251 qp_ctx->order_ring = NULL;
254 if (qp_ctx->private_qp_ctx) {
255 rte_free(qp_ctx->private_qp_ctx);
256 qp_ctx->private_qp_ctx = NULL;
260 if (sched_ctx->private_ctx) {
261 rte_free(sched_ctx->private_ctx);
262 sched_ctx->private_ctx = NULL;
265 if (sched_ctx->capabilities) {
266 rte_free(sched_ctx->capabilities);
267 sched_ctx->capabilities = NULL;
273 /** Get device statistics */
275 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
276 struct rte_cryptodev_stats *stats)
278 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
281 for (i = 0; i < sched_ctx->nb_workers; i++) {
282 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
283 struct rte_cryptodev *worker_dev =
284 rte_cryptodev_pmd_get_dev(worker_dev_id);
285 struct rte_cryptodev_stats worker_stats = {0};
287 (*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
289 stats->enqueued_count += worker_stats.enqueued_count;
290 stats->dequeued_count += worker_stats.dequeued_count;
292 stats->enqueue_err_count += worker_stats.enqueue_err_count;
293 stats->dequeue_err_count += worker_stats.dequeue_err_count;
297 /** Reset device statistics */
299 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
301 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
304 for (i = 0; i < sched_ctx->nb_workers; i++) {
305 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
306 struct rte_cryptodev *worker_dev =
307 rte_cryptodev_pmd_get_dev(worker_dev_id);
309 (*worker_dev->dev_ops->stats_reset)(worker_dev);
313 /** Get device info */
315 scheduler_pmd_info_get(struct rte_cryptodev *dev,
316 struct rte_cryptodev_info *dev_info)
318 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
319 uint32_t max_nb_sess = 0;
320 uint16_t headroom_sz = 0;
321 uint16_t tailroom_sz = 0;
327 /* although scheduler_attach_init_worker presents multiple times,
328 * there will be only 1 meaningful execution.
330 scheduler_attach_init_worker(dev);
332 for (i = 0; i < sched_ctx->nb_workers; i++) {
333 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
334 struct rte_cryptodev_info worker_info;
336 rte_cryptodev_info_get(worker_dev_id, &worker_info);
337 uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
338 if (dev_max_sess != 0) {
339 if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
340 max_nb_sess = worker_info.sym.max_nb_sessions;
343 /* Get the max headroom requirement among worker PMDs */
344 headroom_sz = worker_info.min_mbuf_headroom_req >
346 worker_info.min_mbuf_headroom_req :
349 /* Get the max tailroom requirement among worker PMDs */
350 tailroom_sz = worker_info.min_mbuf_tailroom_req >
352 worker_info.min_mbuf_tailroom_req :
356 dev_info->driver_id = dev->driver_id;
357 dev_info->feature_flags = dev->feature_flags;
358 dev_info->capabilities = sched_ctx->capabilities;
359 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
360 dev_info->min_mbuf_headroom_req = headroom_sz;
361 dev_info->min_mbuf_tailroom_req = tailroom_sz;
362 dev_info->sym.max_nb_sessions = max_nb_sess;
365 /** Release queue pair */
367 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
369 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
374 if (qp_ctx->order_ring)
375 rte_ring_free(qp_ctx->order_ring);
376 if (qp_ctx->private_qp_ctx)
377 rte_free(qp_ctx->private_qp_ctx);
380 dev->data->queue_pairs[qp_id] = NULL;
385 /** Setup a queue pair */
387 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
388 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
390 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
391 struct scheduler_qp_ctx *qp_ctx;
392 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
396 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
397 "CRYTO_SCHE PMD %u QP %u",
398 dev->data->dev_id, qp_id) < 0) {
399 CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
403 /* Free memory prior to re-allocation if needed. */
404 if (dev->data->queue_pairs[qp_id] != NULL)
405 scheduler_pmd_qp_release(dev, qp_id);
407 for (i = 0; i < sched_ctx->nb_workers; i++) {
408 uint8_t worker_id = sched_ctx->workers[i].dev_id;
411 * All workers will share the same session mempool
412 * for session-less operations, so the objects
413 * must be big enough for all the drivers used.
415 ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
421 /* Allocate the queue pair data structure. */
422 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
427 /* The actual available object number = nb_descriptors - 1 */
428 qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
430 dev->data->queue_pairs[qp_id] = qp_ctx;
432 /* although scheduler_attach_init_worker presents multiple times,
433 * there will be only 1 meaningful execution.
435 ret = scheduler_attach_init_worker(dev);
437 CR_SCHED_LOG(ERR, "Failed to attach worker");
438 scheduler_pmd_qp_release(dev, qp_id);
442 if (*sched_ctx->ops.config_queue_pair) {
443 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
444 CR_SCHED_LOG(ERR, "Unable to configure queue pair");
453 scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
455 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
457 uint32_t max_priv_sess_size = 0;
459 /* Check what is the maximum private session size for all workers */
460 for (i = 0; i < sched_ctx->nb_workers; i++) {
461 uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
462 struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
463 uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
465 if (max_priv_sess_size < priv_sess_size)
466 max_priv_sess_size = priv_sess_size;
469 return max_priv_sess_size;
473 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
474 struct rte_crypto_sym_xform *xform,
475 struct rte_cryptodev_sym_session *sess,
476 struct rte_mempool *mempool)
478 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
482 for (i = 0; i < sched_ctx->nb_workers; i++) {
483 struct scheduler_worker *worker = &sched_ctx->workers[i];
485 ret = rte_cryptodev_sym_session_init(worker->dev_id, sess,
488 CR_SCHED_LOG(ERR, "unable to config sym session");
496 /** Clear the memory of session so it doesn't leave key material behind */
498 scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
499 struct rte_cryptodev_sym_session *sess)
501 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
504 /* Clear private data of workers */
505 for (i = 0; i < sched_ctx->nb_workers; i++) {
506 struct scheduler_worker *worker = &sched_ctx->workers[i];
508 rte_cryptodev_sym_session_clear(worker->dev_id, sess);
512 static struct rte_cryptodev_ops scheduler_pmd_ops = {
513 .dev_configure = scheduler_pmd_config,
514 .dev_start = scheduler_pmd_start,
515 .dev_stop = scheduler_pmd_stop,
516 .dev_close = scheduler_pmd_close,
518 .stats_get = scheduler_pmd_stats_get,
519 .stats_reset = scheduler_pmd_stats_reset,
521 .dev_infos_get = scheduler_pmd_info_get,
523 .queue_pair_setup = scheduler_pmd_qp_setup,
524 .queue_pair_release = scheduler_pmd_qp_release,
526 .sym_session_get_size = scheduler_pmd_sym_session_get_size,
527 .sym_session_configure = scheduler_pmd_sym_session_configure,
528 .sym_session_clear = scheduler_pmd_sym_session_clear,
531 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;