4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_config.h>
35 #include <rte_common.h>
36 #include <rte_malloc.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
40 #include <rte_reorder.h>
42 #include "scheduler_pmd_private.h"
44 /** Configure device */
46 scheduler_pmd_config(struct rte_cryptodev *dev,
47 struct rte_cryptodev_config *config)
49 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
53 for (i = 0; i < sched_ctx->nb_slaves; i++) {
54 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
55 struct rte_cryptodev *slave_dev =
56 rte_cryptodev_pmd_get_dev(slave_dev_id);
58 ret = (*slave_dev->dev_ops->dev_configure)(slave_dev,
68 update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
70 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
71 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
73 if (sched_ctx->reordering_enabled) {
74 char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
75 uint32_t buff_size = rte_align32pow2(
76 sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
78 if (qp_ctx->order_ring) {
79 rte_ring_free(qp_ctx->order_ring);
80 qp_ctx->order_ring = NULL;
86 if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
87 "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
88 dev->data->dev_id, qp_id) < 0) {
89 CS_LOG_ERR("failed to create unique reorder buffer "
94 qp_ctx->order_ring = rte_ring_create(order_ring_name,
95 buff_size, rte_socket_id(),
96 RING_F_SP_ENQ | RING_F_SC_DEQ);
97 if (!qp_ctx->order_ring) {
98 CS_LOG_ERR("failed to create order ring");
102 if (qp_ctx->order_ring) {
103 rte_ring_free(qp_ctx->order_ring);
104 qp_ctx->order_ring = NULL;
113 scheduler_pmd_start(struct rte_cryptodev *dev)
115 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
119 if (dev->data->dev_started)
122 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
123 ret = update_order_ring(dev, i);
125 CS_LOG_ERR("Failed to update reorder buffer");
130 if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
131 CS_LOG_ERR("Scheduler mode is not set");
135 if (!sched_ctx->nb_slaves) {
136 CS_LOG_ERR("No slave in the scheduler");
140 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
142 for (i = 0; i < sched_ctx->nb_slaves; i++) {
143 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
145 if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
146 CS_LOG_ERR("Failed to attach slave");
151 RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
153 if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
154 CS_LOG_ERR("Scheduler start failed");
158 /* start all slaves */
159 for (i = 0; i < sched_ctx->nb_slaves; i++) {
160 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
161 struct rte_cryptodev *slave_dev =
162 rte_cryptodev_pmd_get_dev(slave_dev_id);
164 ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
166 CS_LOG_ERR("Failed to start slave dev %u",
177 scheduler_pmd_stop(struct rte_cryptodev *dev)
179 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
182 if (!dev->data->dev_started)
185 /* stop all slaves first */
186 for (i = 0; i < sched_ctx->nb_slaves; i++) {
187 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
188 struct rte_cryptodev *slave_dev =
189 rte_cryptodev_pmd_get_dev(slave_dev_id);
191 (*slave_dev->dev_ops->dev_stop)(slave_dev);
194 if (*sched_ctx->ops.scheduler_stop)
195 (*sched_ctx->ops.scheduler_stop)(dev);
197 for (i = 0; i < sched_ctx->nb_slaves; i++) {
198 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
200 if (*sched_ctx->ops.slave_detach)
201 (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
207 scheduler_pmd_close(struct rte_cryptodev *dev)
209 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
213 /* the dev should be stopped before being closed */
214 if (dev->data->dev_started)
217 /* close all slaves first */
218 for (i = 0; i < sched_ctx->nb_slaves; i++) {
219 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
220 struct rte_cryptodev *slave_dev =
221 rte_cryptodev_pmd_get_dev(slave_dev_id);
223 ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
228 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
229 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
231 if (qp_ctx->order_ring) {
232 rte_ring_free(qp_ctx->order_ring);
233 qp_ctx->order_ring = NULL;
236 if (qp_ctx->private_qp_ctx) {
237 rte_free(qp_ctx->private_qp_ctx);
238 qp_ctx->private_qp_ctx = NULL;
242 if (sched_ctx->private_ctx)
243 rte_free(sched_ctx->private_ctx);
245 if (sched_ctx->capabilities)
246 rte_free(sched_ctx->capabilities);
251 /** Get device statistics */
253 scheduler_pmd_stats_get(struct rte_cryptodev *dev,
254 struct rte_cryptodev_stats *stats)
256 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
259 for (i = 0; i < sched_ctx->nb_slaves; i++) {
260 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
261 struct rte_cryptodev *slave_dev =
262 rte_cryptodev_pmd_get_dev(slave_dev_id);
263 struct rte_cryptodev_stats slave_stats = {0};
265 (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
267 stats->enqueued_count += slave_stats.enqueued_count;
268 stats->dequeued_count += slave_stats.dequeued_count;
270 stats->enqueue_err_count += slave_stats.enqueue_err_count;
271 stats->dequeue_err_count += slave_stats.dequeue_err_count;
275 /** Reset device statistics */
277 scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
279 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
282 for (i = 0; i < sched_ctx->nb_slaves; i++) {
283 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
284 struct rte_cryptodev *slave_dev =
285 rte_cryptodev_pmd_get_dev(slave_dev_id);
287 (*slave_dev->dev_ops->stats_reset)(slave_dev);
291 /** Get device info */
293 scheduler_pmd_info_get(struct rte_cryptodev *dev,
294 struct rte_cryptodev_info *dev_info)
296 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
297 uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
298 UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
304 for (i = 0; i < sched_ctx->nb_slaves; i++) {
305 uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
306 struct rte_cryptodev_info slave_info;
308 rte_cryptodev_info_get(slave_dev_id, &slave_info);
309 max_nb_sessions = slave_info.sym.max_nb_sessions <
311 slave_info.sym.max_nb_sessions :
315 dev_info->dev_type = dev->dev_type;
316 dev_info->feature_flags = dev->feature_flags;
317 dev_info->capabilities = sched_ctx->capabilities;
318 dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
319 dev_info->sym.max_nb_sessions = max_nb_sessions;
322 /** Release queue pair */
324 scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
326 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
331 if (qp_ctx->order_ring)
332 rte_ring_free(qp_ctx->order_ring);
333 if (qp_ctx->private_qp_ctx)
334 rte_free(qp_ctx->private_qp_ctx);
337 dev->data->queue_pairs[qp_id] = NULL;
342 /** Setup a queue pair */
344 scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
345 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
347 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
348 struct scheduler_qp_ctx *qp_ctx;
349 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
351 if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
352 "CRYTO_SCHE PMD %u QP %u",
353 dev->data->dev_id, qp_id) < 0) {
354 CS_LOG_ERR("Failed to create unique queue pair name");
358 /* Free memory prior to re-allocation if needed. */
359 if (dev->data->queue_pairs[qp_id] != NULL)
360 scheduler_pmd_qp_release(dev, qp_id);
362 /* Allocate the queue pair data structure. */
363 qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
368 dev->data->queue_pairs[qp_id] = qp_ctx;
370 if (*sched_ctx->ops.config_queue_pair) {
371 if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
372 CS_LOG_ERR("Unable to configure queue pair");
380 /** Start queue pair */
382 scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
383 __rte_unused uint16_t queue_pair_id)
388 /** Stop queue pair */
390 scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
391 __rte_unused uint16_t queue_pair_id)
396 /** Return the number of allocated queue pairs */
398 scheduler_pmd_qp_count(struct rte_cryptodev *dev)
400 return dev->data->nb_queue_pairs;
404 scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
406 return sizeof(struct scheduler_session);
410 config_slave_sess(struct scheduler_ctx *sched_ctx,
411 struct rte_crypto_sym_xform *xform,
412 struct scheduler_session *sess,
417 for (i = 0; i < sched_ctx->nb_slaves; i++) {
418 struct scheduler_slave *slave = &sched_ctx->slaves[i];
419 struct rte_cryptodev *dev =
420 rte_cryptodev_pmd_get_dev(slave->dev_id);
422 if (sess->sessions[i]) {
426 (*dev->dev_ops->session_clear)(dev,
427 (void *)sess->sessions[i]);
428 sess->sessions[i] = NULL;
434 rte_cryptodev_sym_session_create(
435 slave->dev_id, xform);
436 if (!sess->sessions[i]) {
437 config_slave_sess(sched_ctx, NULL, sess, 0);
446 /** Clear the memory of session so it doesn't leave key material behind */
448 scheduler_pmd_session_clear(struct rte_cryptodev *dev,
451 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
453 config_slave_sess(sched_ctx, NULL, sess, 0);
455 memset(sess, 0, sizeof(struct scheduler_session));
459 scheduler_pmd_session_configure(struct rte_cryptodev *dev,
460 struct rte_crypto_sym_xform *xform, void *sess)
462 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
464 if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
465 CS_LOG_ERR("unabled to config sym session");
472 struct rte_cryptodev_ops scheduler_pmd_ops = {
473 .dev_configure = scheduler_pmd_config,
474 .dev_start = scheduler_pmd_start,
475 .dev_stop = scheduler_pmd_stop,
476 .dev_close = scheduler_pmd_close,
478 .stats_get = scheduler_pmd_stats_get,
479 .stats_reset = scheduler_pmd_stats_reset,
481 .dev_infos_get = scheduler_pmd_info_get,
483 .queue_pair_setup = scheduler_pmd_qp_setup,
484 .queue_pair_release = scheduler_pmd_qp_release,
485 .queue_pair_start = scheduler_pmd_qp_start,
486 .queue_pair_stop = scheduler_pmd_qp_stop,
487 .queue_pair_count = scheduler_pmd_qp_count,
489 .session_get_size = scheduler_pmd_session_get_size,
490 .session_configure = scheduler_pmd_session_configure,
491 .session_clear = scheduler_pmd_session_clear,
494 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;