X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_roundrobin.c;h=c7082a64ee67edbb24cb1c3873559037f08ec4bf;hb=177b41ceee6137ede25fc875d36d61c1a3c7acf7;hp=4990c743c0f6c9c196614136fd4cd588500f64ca;hpb=211e27a9c29b623d07ae3d1fb398dcfab8c7d46d;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index 4990c743c0..c7082a64ee 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include @@ -37,7 +9,7 @@ #include "scheduler_pmd_private.h" struct rr_scheduler_qp_ctx { - struct scheduler_slave slaves[MAX_SLAVES_NUM]; + struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; uint32_t nb_slaves; uint32_t last_enq_slave_idx; @@ -52,8 +24,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; if (unlikely(nb_ops == 0)) return 0; @@ -61,39 +31,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) for (i = 0; i < nb_ops && i < 4; i++) rte_prefetch0(ops[i]->sym->session); - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 7]->sym->session); - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; - } - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, slave->qp_id, ops, nb_ops); @@ -102,12 +39,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) rr_qp_ctx->last_enq_slave_idx += 1; rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } - return processed_ops; } @@ -115,79 +46,16 @@ static uint16_t schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct scheduler_qp_ctx *qp_ctx = qp; - struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx; - uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; - struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; - uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; - - if (unlikely(nb_ops == 0)) - return 0; - - for (i = 0; i < nb_ops && i < 4; i++) { - rte_prefetch0(ops[i]->sym->session); - rte_prefetch0(ops[i]->sym->m_src); - } - - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i]->sym->m_src->seqn = qp_ctx->seqn++; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++; - - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 4]->sym->m_src); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 5]->sym->m_src); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 6]->sym->m_src); - rte_prefetch0(ops[i + 7]->sym->session); - rte_prefetch0(ops[i + 7]->sym->m_src); - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i]->sym->m_src->seqn = qp_ctx->seqn++; - } - - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, ops, nb_ops); - - slave->nb_inflight_cops += processed_ops; + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; + uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring, + nb_ops); + uint16_t nb_ops_enqd = schedule_enqueue(qp, ops, + nb_ops_to_enq); - rr_qp_ctx->last_enq_slave_idx += 1; - rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; + scheduler_order_insert(order_ring, ops, nb_ops_enqd); - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } - - return processed_ops; + return nb_ops_enqd; } @@ -232,105 +100,12 @@ static uint16_t schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct scheduler_qp_ctx *qp_ctx = (struct scheduler_qp_ctx *)qp; - struct rr_scheduler_qp_ctx *rr_qp_ctx = (qp_ctx->private_qp_ctx); - struct scheduler_slave *slave; - struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf; - struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; - uint16_t nb_deq_ops, nb_drained_mbufs; - const uint16_t nb_op_ops = nb_ops; - struct rte_crypto_op *op_ops[nb_op_ops]; - struct rte_mbuf *reorder_mbufs[nb_op_ops]; - uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx; - uint16_t i; - - if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) { - do { - last_slave_idx += 1; - - if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves)) - last_slave_idx = 0; - /* looped back, means no inflight cops in the queue */ - if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx) - return 0; - } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops - == 0); - } + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; - slave = &rr_qp_ctx->slaves[last_slave_idx]; - - nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id, - slave->qp_id, op_ops, nb_ops); - - rr_qp_ctx->last_deq_slave_idx += 1; - rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves; - - slave->nb_inflight_cops -= nb_deq_ops; - - for (i = 0; i < nb_deq_ops && i < 4; i++) - rte_prefetch0(op_ops[i]->sym->m_src); - - for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) { - mbuf0 = op_ops[i]->sym->m_src; - mbuf1 = op_ops[i + 1]->sym->m_src; - mbuf2 = op_ops[i + 2]->sym->m_src; - mbuf3 = op_ops[i + 3]->sym->m_src; - - mbuf0->userdata = op_ops[i]; - mbuf1->userdata = op_ops[i + 1]; - mbuf2->userdata = op_ops[i + 2]; - mbuf3->userdata = op_ops[i + 3]; - - rte_reorder_insert(reorder_buff, mbuf0); - rte_reorder_insert(reorder_buff, mbuf1); - rte_reorder_insert(reorder_buff, mbuf2); - rte_reorder_insert(reorder_buff, mbuf3); - - rte_prefetch0(op_ops[i + 4]->sym->m_src); - rte_prefetch0(op_ops[i + 5]->sym->m_src); - rte_prefetch0(op_ops[i + 6]->sym->m_src); - rte_prefetch0(op_ops[i + 7]->sym->m_src); - } - - for (; i < nb_deq_ops; i++) { - mbuf0 = op_ops[i]->sym->m_src; - mbuf0->userdata = op_ops[i]; - rte_reorder_insert(reorder_buff, mbuf0); - } - - nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs, - nb_ops); - for (i = 0; i < nb_drained_mbufs && i < 4; i++) - rte_prefetch0(reorder_mbufs[i]); - - for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8); - i += 4) { - ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata; - ops[i + 1] = *(struct rte_crypto_op **) - reorder_mbufs[i + 1]->userdata; - ops[i + 2] = *(struct rte_crypto_op **) - reorder_mbufs[i + 2]->userdata; - ops[i + 3] = *(struct rte_crypto_op **) - reorder_mbufs[i + 3]->userdata; - - reorder_mbufs[i]->userdata = NULL; - reorder_mbufs[i + 1]->userdata = NULL; - reorder_mbufs[i + 2]->userdata = NULL; - reorder_mbufs[i + 3]->userdata = NULL; - - rte_prefetch0(reorder_mbufs[i + 4]); - rte_prefetch0(reorder_mbufs[i + 5]); - rte_prefetch0(reorder_mbufs[i + 6]); - rte_prefetch0(reorder_mbufs[i + 7]); - } - - for (; i < nb_drained_mbufs; i++) { - ops[i] = *(struct rte_crypto_op **) - reorder_mbufs[i]->userdata; - reorder_mbufs[i]->userdata = NULL; - } + schedule_dequeue(qp, ops, nb_ops); - return nb_drained_mbufs; + return scheduler_order_drain(order_ring, ops, nb_ops); } static int @@ -367,7 +142,8 @@ scheduler_start(struct rte_cryptodev *dev) qp_ctx->private_qp_ctx; uint32_t j; - memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM * + memset(rr_qp_ctx->slaves, 0, + RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES * sizeof(struct scheduler_slave)); for (j = 0; j < sched_ctx->nb_slaves; j++) { rr_qp_ctx->slaves[j].dev_id = @@ -399,7 +175,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0, rte_socket_id()); if (!rr_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -420,7 +196,9 @@ struct rte_cryptodev_scheduler_ops scheduler_rr_ops = { scheduler_start, scheduler_stop, scheduler_config_qp, - scheduler_create_private_ctx + scheduler_create_private_ctx, + NULL, /* option_set */ + NULL /* option_get */ }; struct rte_cryptodev_scheduler scheduler = {