X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Frte_cryptodev_scheduler.c;h=9367a0e915d07fc971879caeb02c7fea3f8f510b;hb=c1d4e9d37abdc6c07a05f7d96928e624fea9ebb5;hp=31309c5620d7a716528baa64c8427b17a2cf1ae5;hpb=7b46f62b8159dc5444cdcd48a46c6656b221510c;p=dpdk.git diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 31309c5620..9367a0e915 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -1,34 +1,7 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ +#include #include #include #include @@ -40,31 +13,31 @@ /** update the scheduler pmd's capability with attaching device's * capability. * For each device to be attached, the scheduler's capability should be - * the common capability set of all slaves + * the common capability set of all workers **/ static uint32_t sync_caps(struct rte_cryptodev_capabilities *caps, uint32_t nb_caps, - const struct rte_cryptodev_capabilities *slave_caps) + const struct rte_cryptodev_capabilities *worker_caps) { - uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0; + uint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0; uint32_t i; - while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED) - nb_slave_caps++; + while (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED) + nb_worker_caps++; if (nb_caps == 0) { - rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps); - return nb_slave_caps; + rte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps); + return nb_worker_caps; } for (i = 0; i < sync_nb_caps; i++) { struct rte_cryptodev_capabilities *cap = &caps[i]; uint32_t j; - for (j = 0; j < nb_slave_caps; j++) { + for (j = 0; j < nb_worker_caps; j++) { const struct rte_cryptodev_capabilities *s_cap = - &slave_caps[j]; + &worker_caps[j]; if (s_cap->op != cap->op || s_cap->sym.xform_type != cap->sym.xform_type) @@ -99,7 +72,7 @@ sync_caps(struct rte_cryptodev_capabilities *caps, break; } - if (j < nb_slave_caps) + if (j < nb_worker_caps) continue; /* remove a uncommon cap from the array */ @@ -119,13 +92,15 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx) struct rte_cryptodev_capabilities tmp_caps[256] = { {0} }; uint32_t nb_caps = 0, i; - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } - for (i = 0; i < sched_ctx->nb_slaves; i++) { + for (i = 0; i < sched_ctx->nb_workers; i++) { struct rte_cryptodev_info dev_info; - rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info); + rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities); if (nb_caps == 0) @@ -152,10 +127,10 @@ update_scheduler_feature_flag(struct rte_cryptodev *dev) dev->feature_flags = 0; - for (i = 0; i < sched_ctx->nb_slaves; i++) { + for (i = 0; i < sched_ctx->nb_workers; i++) { struct rte_cryptodev_info dev_info; - rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info); + rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); dev->feature_flags |= dev_info.feature_flags; } @@ -167,15 +142,15 @@ update_max_nb_qp(struct scheduler_ctx *sched_ctx) uint32_t i; uint32_t max_nb_qp; - if (!sched_ctx->nb_slaves) + if (!sched_ctx->nb_workers) return; - max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0; + max_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0; - for (i = 0; i < sched_ctx->nb_slaves; i++) { + for (i = 0; i < sched_ctx->nb_workers; i++) { struct rte_cryptodev_info dev_info; - rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info); + rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info); max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ? dev_info.max_nb_queue_pairs : max_nb_qp; } @@ -185,56 +160,56 @@ update_max_nb_qp(struct scheduler_ctx *sched_ctx) /** Attach a device to the scheduler. */ int -rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) +rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id) { struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); struct scheduler_ctx *sched_ctx; - struct scheduler_slave *slave; + struct scheduler_worker *worker; struct rte_cryptodev_info dev_info; uint32_t i; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; - if (sched_ctx->nb_slaves >= - RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { - CS_LOG_ERR("Too many slaves attached"); + if (sched_ctx->nb_workers >= + RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) { + CR_SCHED_LOG(ERR, "Too many workers attached"); return -ENOMEM; } - for (i = 0; i < sched_ctx->nb_slaves; i++) - if (sched_ctx->slaves[i].dev_id == slave_id) { - CS_LOG_ERR("Slave already added"); + for (i = 0; i < sched_ctx->nb_workers; i++) + if (sched_ctx->workers[i].dev_id == worker_id) { + CR_SCHED_LOG(ERR, "Worker already added"); return -ENOTSUP; } - slave = &sched_ctx->slaves[sched_ctx->nb_slaves]; + worker = &sched_ctx->workers[sched_ctx->nb_workers]; - rte_cryptodev_info_get(slave_id, &dev_info); + rte_cryptodev_info_get(worker_id, &dev_info); - slave->dev_id = slave_id; - slave->dev_type = dev_info.dev_type; - sched_ctx->nb_slaves++; + worker->dev_id = worker_id; + worker->driver_id = dev_info.driver_id; + sched_ctx->nb_workers++; if (update_scheduler_capability(sched_ctx) < 0) { - slave->dev_id = 0; - slave->dev_type = 0; - sched_ctx->nb_slaves--; + worker->dev_id = 0; + worker->driver_id = 0; + sched_ctx->nb_workers--; - CS_LOG_ERR("capabilities update failed"); + CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } @@ -246,52 +221,52 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) } int -rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) +rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id) { struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); struct scheduler_ctx *sched_ctx; - uint32_t i, slave_pos; + uint32_t i, worker_pos; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; - for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++) - if (sched_ctx->slaves[slave_pos].dev_id == slave_id) + for (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++) + if (sched_ctx->workers[worker_pos].dev_id == worker_id) break; - if (slave_pos == sched_ctx->nb_slaves) { - CS_LOG_ERR("Cannot find slave"); + if (worker_pos == sched_ctx->nb_workers) { + CR_SCHED_LOG(ERR, "Cannot find worker"); return -ENOTSUP; } - if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) { - CS_LOG_ERR("Failed to detach slave"); + if (sched_ctx->ops.worker_detach(dev, worker_id) < 0) { + CR_SCHED_LOG(ERR, "Failed to detach worker"); return -ENOTSUP; } - for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) { - memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1], - sizeof(struct scheduler_slave)); + for (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) { + memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1], + sizeof(struct scheduler_worker)); } - memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0, - sizeof(struct scheduler_slave)); - sched_ctx->nb_slaves--; + memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0, + sizeof(struct scheduler_worker)); + sched_ctx->nb_workers--; if (update_scheduler_capability(sched_ctx) < 0) { - CS_LOG_ERR("capabilities update failed"); + CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } @@ -310,17 +285,17 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -332,40 +307,40 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, switch (mode) { case CDEV_SCHED_MODE_ROUNDROBIN: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, - roundrobin_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + crypto_scheduler_roundrobin) < 0) { + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_PKT_SIZE_DISTR: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, - pkt_size_based_distr_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + crypto_scheduler_pkt_size_based_distr) < 0) { + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_FAILOVER: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, - failover_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + crypto_scheduler_failover) < 0) { + CR_SCHED_LOG(ERR, "Failed to load scheduler"); + return -1; + } + break; + case CDEV_SCHED_MODE_MULTICORE: + if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, + crypto_scheduler_multicore) < 0) { + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; default: - CS_LOG_ERR("Not yet supported"); + CR_SCHED_LOG(ERR, "Not yet supported"); return -ENOTSUP; } return 0; } -int -rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id, - enum rte_cryptodev_scheduler_mode mode) -{ - return rte_cryptodev_scheduler_mode_set(scheduler_id, mode); -} - enum rte_cryptodev_scheduler_mode rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) { @@ -373,12 +348,12 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -387,12 +362,6 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) return sched_ctx->mode; } -enum rte_cryptodev_scheduler_mode -rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id) -{ - return rte_cryptodev_scheduler_mode_get(scheduler_id); -} - int rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, uint32_t enable_reorder) @@ -401,17 +370,17 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -429,12 +398,12 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id) struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -451,43 +420,60 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; - strncpy(sched_ctx->name, scheduler->name, - RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); - strncpy(sched_ctx->description, scheduler->description, - RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN); + if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) { + CR_SCHED_LOG(ERR, "Invalid name %s, should be less than " + "%u bytes.", scheduler->name, + RTE_CRYPTODEV_NAME_MAX_LEN); + return -EINVAL; + } + strlcpy(sched_ctx->name, scheduler->name, sizeof(sched_ctx->name)); + + if (strlen(scheduler->description) > + RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) { + CR_SCHED_LOG(ERR, "Invalid description %s, should be less than " + "%u bytes.", scheduler->description, + RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1); + return -EINVAL; + } + strlcpy(sched_ctx->description, scheduler->description, + sizeof(sched_ctx->description)); /* load scheduler instance operations functions */ sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair; sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx; sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start; sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop; - sched_ctx->ops.slave_attach = scheduler->ops->slave_attach; - sched_ctx->ops.slave_detach = scheduler->ops->slave_detach; + sched_ctx->ops.worker_attach = scheduler->ops->worker_attach; + sched_ctx->ops.worker_detach = scheduler->ops->worker_detach; + sched_ctx->ops.option_set = scheduler->ops->option_set; + sched_ctx->ops.option_get = scheduler->ops->option_get; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } if (sched_ctx->ops.create_private_ctx) { int ret = (*sched_ctx->ops.create_private_ctx)(dev); if (ret < 0) { - CS_LOG_ERR("Unable to create scheduler private " + CR_SCHED_LOG(ERR, "Unable to create scheduler private " "context"); return ret; } @@ -499,32 +485,96 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, } int -rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves) +rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers) { struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); struct scheduler_ctx *sched_ctx; - uint32_t nb_slaves = 0; + uint32_t nb_workers = 0; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } - if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) { - CS_LOG_ERR("Operation not supported"); + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } sched_ctx = dev->data->dev_private; - nb_slaves = sched_ctx->nb_slaves; + nb_workers = sched_ctx->nb_workers; - if (slaves && nb_slaves) { + if (workers && nb_workers) { uint32_t i; - for (i = 0; i < nb_slaves; i++) - slaves[i] = sched_ctx->slaves[i].dev_id; + for (i = 0; i < nb_workers; i++) + workers[i] = sched_ctx->workers[i].dev_id; } - return (int)nb_slaves; + return (int)nb_workers; } + +int +rte_cryptodev_scheduler_option_set(uint8_t scheduler_id, + enum rte_cryptodev_schedule_option_type option_type, + void *option) +{ + struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); + struct scheduler_ctx *sched_ctx; + + if (option_type == CDEV_SCHED_OPTION_NOT_SET || + option_type >= CDEV_SCHED_OPTION_COUNT) { + CR_SCHED_LOG(ERR, "Invalid option parameter"); + return -EINVAL; + } + + if (!option) { + CR_SCHED_LOG(ERR, "Invalid option parameter"); + return -EINVAL; + } + + if (dev->data->dev_started) { + CR_SCHED_LOG(ERR, "Illegal operation"); + return -EBUSY; + } + + sched_ctx = dev->data->dev_private; + + RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP); + + return (*sched_ctx->ops.option_set)(dev, option_type, option); +} + +int +rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, + enum rte_cryptodev_schedule_option_type option_type, + void *option) +{ + struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id); + struct scheduler_ctx *sched_ctx; + + if (!dev) { + CR_SCHED_LOG(ERR, "Operation not supported"); + return -ENOTSUP; + } + + if (!option) { + CR_SCHED_LOG(ERR, "Invalid option parameter"); + return -EINVAL; + } + + if (dev->driver_id != cryptodev_scheduler_driver_id) { + CR_SCHED_LOG(ERR, "Operation not supported"); + return -ENOTSUP; + } + + sched_ctx = dev->data->dev_private; + + RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP); + + return (*sched_ctx->ops.option_get)(dev, option_type, option); +} + + +RTE_LOG_REGISTER(scheduler_logtype_driver, pmd.crypto.scheduler, INFO);