4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_config.h>
35 #include <rte_common.h>
36 #include <rte_malloc.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
40 #include <rte_reorder.h>
42 #include "scheduler_pmd_private.h"
44 /** Configure device */
46 scheduler_pmd_config(struct rte_cryptodev *dev)
48 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
52 for (i = 0; i < sched_ctx->nb_slaves; i++) {
53 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
54 struct rte_cryptodev *slave_dev =
55 rte_cryptodev_pmd_get_dev(slave_dev_id);
57 ret = (*slave_dev->dev_ops->dev_configure)(slave_dev);
66 update_reorder_buff(struct rte_cryptodev *dev, uint16_t qp_id)
68 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
69 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
71 if (sched_ctx->reordering_enabled) {
72 char reorder_buff_name[RTE_CRYPTODEV_NAME_MAX_LEN];
73 uint32_t buff_size = sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE;
75 if (qp_ctx->reorder_buf) {
76 rte_reorder_free(qp_ctx->reorder_buf);
77 qp_ctx->reorder_buf = NULL;
83 if (snprintf(reorder_buff_name, RTE_CRYPTODEV_NAME_MAX_LEN,
84 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
85 dev->data->dev_id, qp_id) < 0) {
86 CS_LOG_ERR("failed to create unique reorder buffer "
91 qp_ctx->reorder_buf = rte_reorder_create(reorder_buff_name,
92 rte_socket_id(), buff_size);
93 if (!qp_ctx->reorder_buf) {
94 CS_LOG_ERR("failed to create reorder buffer");
98 if (qp_ctx->reorder_buf) {
99 rte_reorder_free(qp_ctx->reorder_buf);
100 qp_ctx->reorder_buf = NULL;
109 scheduler_pmd_start(struct rte_cryptodev *dev)
111 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
115 if (dev->data->dev_started)
118 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
119 ret = update_reorder_buff(dev, i);
121 CS_LOG_ERR("Failed to update reorder buffer");
126 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
127 CS_LOG_ERR("Scheduler mode is not set");
131 if (!sched_ctx->nb_slaves) {
132 CS_LOG_ERR("No slave in the scheduler");
136 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
138 for (i = 0; i < sched_ctx->nb_slaves; i++) {
139 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
141 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
142 CS_LOG_ERR("Failed to attach slave");
147 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
149 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
150 CS_LOG_ERR("Scheduler start failed");
154 /* start all slaves */
155 for (i = 0; i < sched_ctx->nb_slaves; i++) {
156 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
157 struct rte_cryptodev *slave_dev =
158 rte_cryptodev_pmd_get_dev(slave_dev_id);
160 ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
162 CS_LOG_ERR("Failed to start slave dev %u",
173 scheduler_pmd_stop(struct rte_cryptodev *dev)
175 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
178 if (!dev->data->dev_started)
181 /* stop all slaves first */
182 for (i = 0; i < sched_ctx->nb_slaves; i++) {
183 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
184 struct rte_cryptodev *slave_dev =
185 rte_cryptodev_pmd_get_dev(slave_dev_id);
187 (*slave_dev->dev_ops->dev_stop)(slave_dev);
190 if (*sched_ctx->ops.scheduler_stop)
191 (*sched_ctx->ops.scheduler_stop)(dev);
193 for (i = 0; i < sched_ctx->nb_slaves; i++) {
194 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
196 if (*sched_ctx->ops.slave_detach)
197 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
203 scheduler_pmd_close(struct rte_cryptodev *dev)
205 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
209 /* the dev should be stopped before being closed */
210 if (dev->data->dev_started)
213 /* close all slaves first */
214 for (i = 0; i < sched_ctx->nb_slaves; i++) {
215 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
216 struct rte_cryptodev *slave_dev =
217 rte_cryptodev_pmd_get_dev(slave_dev_id);
219 ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
224 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
225 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
227 if (qp_ctx->reorder_buf) {
228 rte_reorder_free(qp_ctx->reorder_buf);
229 qp_ctx->reorder_buf = NULL;
232 if (qp_ctx->private_qp_ctx) {
233 rte_free(qp_ctx->private_qp_ctx);
234 qp_ctx->private_qp_ctx = NULL;
238 if (sched_ctx->private_ctx)
239 rte_free(sched_ctx->private_ctx);
241 if (sched_ctx->capabilities)
242 rte_free(sched_ctx->capabilities);
247 /** Get device statistics */
249 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
250 struct rte_cryptodev_stats *stats)
252 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
255 for (i = 0; i < sched_ctx->nb_slaves; i++) {
256 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
257 struct rte_cryptodev *slave_dev =
258 rte_cryptodev_pmd_get_dev(slave_dev_id);
259 struct rte_cryptodev_stats slave_stats = {0};
261 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
263 stats->enqueued_count += slave_stats.enqueued_count;
264 stats->dequeued_count += slave_stats.dequeued_count;
266 stats->enqueue_err_count += slave_stats.enqueue_err_count;
267 stats->dequeue_err_count += slave_stats.dequeue_err_count;
271 /** Reset device statistics */
273 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
275 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
278 for (i = 0; i < sched_ctx->nb_slaves; i++) {
279 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
280 struct rte_cryptodev *slave_dev =
281 rte_cryptodev_pmd_get_dev(slave_dev_id);
283 (*slave_dev->dev_ops->stats_reset)(slave_dev);
287 /** Get device info */
289 scheduler_pmd_info_get(struct rte_cryptodev *dev,
290 struct rte_cryptodev_info *dev_info)
292 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
293 uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
294 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
300 for (i = 0; i < sched_ctx->nb_slaves; i++) {
301 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
302 struct rte_cryptodev_info slave_info;
304 rte_cryptodev_info_get(slave_dev_id, &slave_info);
305 max_nb_sessions = slave_info.sym.max_nb_sessions <
307 slave_info.sym.max_nb_sessions :
311 dev_info->dev_type = dev->dev_type;
312 dev_info->feature_flags = dev->feature_flags;
313 dev_info->capabilities = sched_ctx->capabilities;
314 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
315 dev_info->sym.max_nb_sessions = max_nb_sessions;
318 /** Release queue pair */
320 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
322 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
327 if (qp_ctx->reorder_buf)
328 rte_reorder_free(qp_ctx->reorder_buf);
329 if (qp_ctx->private_qp_ctx)
330 rte_free(qp_ctx->private_qp_ctx);
333 dev->data->queue_pairs[qp_id] = NULL;
338 /** Setup a queue pair */
340 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
341 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
343 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
344 struct scheduler_qp_ctx *qp_ctx;
345 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
347 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
348 "CRYTO_SCHE PMD %u QP %u",
349 dev->data->dev_id, qp_id) < 0) {
350 CS_LOG_ERR("Failed to create unique queue pair name");
354 /* Free memory prior to re-allocation if needed. */
355 if (dev->data->queue_pairs[qp_id] != NULL)
356 scheduler_pmd_qp_release(dev, qp_id);
358 /* Allocate the queue pair data structure. */
359 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
364 dev->data->queue_pairs[qp_id] = qp_ctx;
366 if (*sched_ctx->ops.config_queue_pair) {
367 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
368 CS_LOG_ERR("Unable to configure queue pair");
376 /** Start queue pair */
378 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
379 __rte_unused uint16_t queue_pair_id)
384 /** Stop queue pair */
386 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
387 __rte_unused uint16_t queue_pair_id)
392 /** Return the number of allocated queue pairs */
394 scheduler_pmd_qp_count(struct rte_cryptodev *dev)
396 return dev->data->nb_queue_pairs;
400 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
402 return sizeof(struct scheduler_session);
406 config_slave_sess(struct scheduler_ctx *sched_ctx,
407 struct rte_crypto_sym_xform *xform,
408 struct scheduler_session *sess,
413 for (i = 0; i < sched_ctx->nb_slaves; i++) {
414 struct scheduler_slave *slave = &sched_ctx->slaves[i];
415 struct rte_cryptodev *dev =
416 rte_cryptodev_pmd_get_dev(slave->dev_id);
418 if (sess->sessions[i]) {
422 (*dev->dev_ops->session_clear)(dev,
423 (void *)sess->sessions[i]);
424 sess->sessions[i] = NULL;
430 rte_cryptodev_sym_session_create(
431 slave->dev_id, xform);
432 if (!sess->sessions[i]) {
433 config_slave_sess(sched_ctx, NULL, sess, 0);
442 /** Clear the memory of session so it doesn't leave key material behind */
444 scheduler_pmd_session_clear(struct rte_cryptodev *dev,
447 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
449 config_slave_sess(sched_ctx, NULL, sess, 0);
451 memset(sess, 0, sizeof(struct scheduler_session));
455 scheduler_pmd_session_configure(struct rte_cryptodev *dev,
456 struct rte_crypto_sym_xform *xform, void *sess)
458 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
460 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
461 CS_LOG_ERR("unabled to config sym session");
468 struct rte_cryptodev_ops scheduler_pmd_ops = {
469 .dev_configure = scheduler_pmd_config,
470 .dev_start = scheduler_pmd_start,
471 .dev_stop = scheduler_pmd_stop,
472 .dev_close = scheduler_pmd_close,
474 .stats_get = scheduler_pmd_stats_get,
475 .stats_reset = scheduler_pmd_stats_reset,
477 .dev_infos_get = scheduler_pmd_info_get,
479 .queue_pair_setup = scheduler_pmd_qp_setup,
480 .queue_pair_release = scheduler_pmd_qp_release,
481 .queue_pair_start = scheduler_pmd_qp_start,
482 .queue_pair_stop = scheduler_pmd_qp_stop,
483 .queue_pair_count = scheduler_pmd_qp_count,
485 .session_get_size = scheduler_pmd_session_get_size,
486 .session_configure = scheduler_pmd_session_configure,
487 .session_clear = scheduler_pmd_session_clear,
490 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;