X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_multicore.c;h=2d6790bb3b5a50ed75a1ae41ce56cd7ade78b70b;hb=4f86c0ba19ba065c8c32a5b0111efc7bb74c9b96;hp=8757552f7c0fcd59ac1fc8c9bdac0f743f3b44ec;hpb=4c07e0552f0a08ebc5eab61ee26cd4318fde8db0;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index 8757552f7c..2d6790bb3b 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include @@ -42,13 +14,15 @@ #define MC_SCHED_BUFFER_SIZE 32 +#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80 + /** multi-core scheduler context */ struct mc_scheduler_ctx { uint32_t num_workers; /**< Number of workers polling */ uint32_t stop_signal; - struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; - struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + struct rte_ring *sched_enq_ring[RTE_MAX_LCORE]; + struct rte_ring *sched_deq_ring[RTE_MAX_LCORE]; }; struct mc_scheduler_qp_ctx { @@ -137,9 +111,24 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rte_ring *order_ring = - ((struct scheduler_qp_ctx *)qp)->order_ring; + ((struct scheduler_qp_ctx *)qp)->order_ring; + struct rte_crypto_op *op; + uint32_t nb_objs, nb_ops_to_deq; + + nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops, + nb_ops, NULL); + if (nb_objs == 0) + return 0; + + for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) { + op = ops[nb_ops_to_deq]; + if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE)) + break; + op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE; + } - return scheduler_order_drain(order_ring, ops, nb_ops); + rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); + return nb_ops_to_deq; } static int @@ -168,11 +157,13 @@ mc_scheduler_worker(struct rte_cryptodev *dev) struct scheduler_slave *slave; struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE]; struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; uint16_t processed_ops; - uint16_t left_op = 0; - uint16_t left_op_idx = 0; + uint16_t pending_enq_ops = 0; + uint16_t pending_enq_ops_idx = 0; + uint16_t pending_deq_ops = 0; + uint16_t pending_deq_ops_idx = 0; uint16_t inflight_ops = 0; + const uint8_t reordering_enabled = sched_ctx->reordering_enabled; for (i = 0; i < (int)sched_ctx->nb_wc; i++) { if (sched_ctx->wc_pool[i] == core_id) { @@ -181,7 +172,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev) } } if (worker_idx == -1) { - CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id); + CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!", + core_id); return -1; } @@ -190,75 +182,51 @@ mc_scheduler_worker(struct rte_cryptodev *dev) deq_ring = mc_ctx->sched_deq_ring[worker_idx]; while (!mc_ctx->stop_signal) { - if (left_op) { + if (pending_enq_ops) { processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, - &enq_ops[left_op_idx], left_op); - - left_op -= processed_ops; - left_op_idx += processed_ops; + slave->qp_id, &enq_ops[pending_enq_ops_idx], + pending_enq_ops); + pending_enq_ops -= processed_ops; + pending_enq_ops_idx += processed_ops; + inflight_ops += processed_ops; } else { - uint16_t nb_deq_ops = rte_ring_dequeue_burst(enq_ring, - (void *)enq_ops, MC_SCHED_BUFFER_SIZE, NULL); - if (nb_deq_ops) { - uint16_t i; - - for (i = 0; i < nb_deq_ops && i < 4; i++) - rte_prefetch0(enq_ops[i]->sym->session); - - for (i = 0; (i < (nb_deq_ops - 8)) - && (nb_deq_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - enq_ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - enq_ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - enq_ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - enq_ops[i+3]->sym->session->_private; - - enq_ops[i]->sym->session = - sess0->sessions[worker_idx]; - enq_ops[i + 1]->sym->session = - sess1->sessions[worker_idx]; - enq_ops[i + 2]->sym->session = - sess2->sessions[worker_idx]; - enq_ops[i + 3]->sym->session = - sess3->sessions[worker_idx]; - - rte_prefetch0(enq_ops[i + 4]->sym->session); - rte_prefetch0(enq_ops[i + 5]->sym->session); - rte_prefetch0(enq_ops[i + 6]->sym->session); - rte_prefetch0(enq_ops[i + 7]->sym->session); - } - - for (; i < nb_deq_ops; i++) { - sess0 = (struct scheduler_session *) - enq_ops[i]->sym->session->_private; - enq_ops[i]->sym->session = - sess0->sessions[worker_idx]; - } - - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, enq_ops, nb_deq_ops); - - if (unlikely(processed_ops < nb_deq_ops)) { - left_op = nb_deq_ops - processed_ops; - left_op_idx = processed_ops; - } - - inflight_ops += processed_ops; + processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops, + MC_SCHED_BUFFER_SIZE, NULL); + if (processed_ops) { + pending_enq_ops_idx = rte_cryptodev_enqueue_burst( + slave->dev_id, slave->qp_id, + enq_ops, processed_ops); + pending_enq_ops = processed_ops - pending_enq_ops_idx; + inflight_ops += pending_enq_ops_idx; } } - if (inflight_ops > 0) { + if (pending_deq_ops) { + processed_ops = rte_ring_enqueue_burst( + deq_ring, (void *)&deq_ops[pending_deq_ops_idx], + pending_deq_ops, NULL); + pending_deq_ops -= processed_ops; + pending_deq_ops_idx += processed_ops; + } else if (inflight_ops) { processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id, slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE); if (processed_ops) { - uint16_t nb_enq_ops = rte_ring_enqueue_burst(deq_ring, - (void *)deq_ops, processed_ops, NULL); - inflight_ops -= nb_enq_ops; + inflight_ops -= processed_ops; + if (reordering_enabled) { + uint16_t j; + + for (j = 0; j < processed_ops; j++) { + deq_ops[j]->status |= + CRYPTO_OP_STATUS_BIT_COMPLETE; + } + } else { + pending_deq_ops_idx = rte_ring_enqueue_burst( + deq_ring, (void *)deq_ops, processed_ops, + NULL); + pending_deq_ops = processed_ops - + pending_deq_ops_idx; + } } } @@ -319,10 +287,11 @@ scheduler_stop(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx; + uint16_t i; mc_ctx->stop_signal = 1; - for (uint16_t i = 0; i < sched_ctx->nb_wc; i++) + for (i = 0; i < sched_ctx->nb_wc; i++) rte_eal_wait_lcore(sched_ctx->wc_pool[i]); return 0; @@ -339,7 +308,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0, rte_socket_id()); if (!mc_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -354,44 +323,70 @@ static int scheduler_create_private_ctx(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct mc_scheduler_ctx *mc_ctx; + struct mc_scheduler_ctx *mc_ctx = NULL; + uint16_t i; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0, rte_socket_id()); if (!mc_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } mc_ctx->num_workers = sched_ctx->nb_wc; - for (uint16_t i = 0; i < sched_ctx->nb_wc; i++) { + for (i = 0; i < sched_ctx->nb_wc; i++) { char r_name[16]; - snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_enq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_enq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } - snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_deq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_deq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } } sched_ctx->private_ctx = (void *)mc_ctx; return 0; + +exit: + for (i = 0; i < sched_ctx->nb_wc; i++) { + rte_ring_free(mc_ctx->sched_enq_ring[i]); + rte_ring_free(mc_ctx->sched_deq_ring[i]); + } + rte_free(mc_ctx); + + return -1; } -struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { +static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { slave_attach, slave_detach, scheduler_start, @@ -402,11 +397,11 @@ struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { NULL /* option_get */ }; -struct rte_cryptodev_scheduler mc_scheduler = { +static struct rte_cryptodev_scheduler mc_scheduler = { .name = "multicore-scheduler", .description = "scheduler which will run burst across multiple cpu cores", .mode = CDEV_SCHED_MODE_MULTICORE, .ops = &scheduler_mc_ops }; -struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler; +struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;