X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_roundrobin.c;h=4a847281ac1ffc2cfd9ea0715d1bc56cf3495ed0;hb=d81734caccade4dc17d24d2ffd8b71244d35a69f;hp=1f5448e9003f30b745653e8a9df31ea7b66f6193;hpb=f106eb44c0e50e0113a7da3da02cb196a09f3f3c;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index 1f5448e900..4a847281ac 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -37,7 +37,7 @@ #include "scheduler_pmd_private.h" struct rr_scheduler_qp_ctx { - struct scheduler_slave slaves[MAX_SLAVES_NUM]; + struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; uint32_t nb_slaves; uint32_t last_enq_slave_idx; @@ -45,15 +45,13 @@ struct rr_scheduler_qp_ctx { }; static uint16_t -schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) +schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rr_scheduler_qp_ctx *rr_qp_ctx = - ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx; + ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; if (unlikely(nb_ops == 0)) return 0; @@ -61,38 +59,6 @@ schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) for (i = 0; i < nb_ops && i < 4; i++) rte_prefetch0(ops[i]->sym->session); - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 7]->sym->session); - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - ops[i]->sym->session = sess0->sessions[slave_idx]; - } - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, slave->qp_id, ops, nb_ops); @@ -101,100 +67,31 @@ schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) rr_qp_ctx->last_enq_slave_idx += 1; rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } - return processed_ops; } static uint16_t -schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops, +schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx; - struct rr_scheduler_qp_ctx *rr_qp_ctx = - gen_qp_ctx->private_qp_ctx; - uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; - struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; - uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; - - if (unlikely(nb_ops == 0)) - return 0; - - for (i = 0; i < nb_ops && i < 4; i++) { - rte_prefetch0(ops[i]->sym->session); - rte_prefetch0(ops[i]->sym->m_src); - } - - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++; - - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 4]->sym->m_src); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 5]->sym->m_src); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 6]->sym->m_src); - rte_prefetch0(ops[i + 7]->sym->session); - rte_prefetch0(ops[i + 7]->sym->m_src); - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++; - } - - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, ops, nb_ops); - - slave->nb_inflight_cops += processed_ops; - - rr_qp_ctx->last_enq_slave_idx += 1; - rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; + uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring, + nb_ops); + uint16_t nb_ops_enqd = schedule_enqueue(qp, ops, + nb_ops_to_enq); - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } + scheduler_order_insert(order_ring, ops, nb_ops_enqd); - return processed_ops; + return nb_ops_enqd; } static uint16_t -schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) +schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rr_scheduler_qp_ctx *rr_qp_ctx = - ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx; + ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; struct scheduler_slave *slave; uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx; uint16_t nb_deq_ops; @@ -228,108 +125,15 @@ schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) } static uint16_t -schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops, +schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx; - struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx); - struct scheduler_slave *slave; - struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf; - struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; - uint16_t nb_deq_ops, nb_drained_mbufs; - const uint16_t nb_op_ops = nb_ops; - struct rte_crypto_op *op_ops[nb_op_ops]; - struct rte_mbuf *reorder_mbufs[nb_op_ops]; - uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx; - uint16_t i; - - if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) { - do { - last_slave_idx += 1; + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; - if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves)) - last_slave_idx = 0; - /* looped back, means no inflight cops in the queue */ - if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx) - return 0; - } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops - == 0); - } - - slave = &rr_qp_ctx->slaves[last_slave_idx]; - - nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id, - slave->qp_id, op_ops, nb_ops); + schedule_dequeue(qp, ops, nb_ops); - rr_qp_ctx->last_deq_slave_idx += 1; - rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves; - - slave->nb_inflight_cops -= nb_deq_ops; - - for (i = 0; i < nb_deq_ops && i < 4; i++) - rte_prefetch0(op_ops[i]->sym->m_src); - - for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) { - mbuf0 = op_ops[i]->sym->m_src; - mbuf1 = op_ops[i + 1]->sym->m_src; - mbuf2 = op_ops[i + 2]->sym->m_src; - mbuf3 = op_ops[i + 3]->sym->m_src; - - mbuf0->userdata = op_ops[i]; - mbuf1->userdata = op_ops[i + 1]; - mbuf2->userdata = op_ops[i + 2]; - mbuf3->userdata = op_ops[i + 3]; - - rte_reorder_insert(reorder_buff, mbuf0); - rte_reorder_insert(reorder_buff, mbuf1); - rte_reorder_insert(reorder_buff, mbuf2); - rte_reorder_insert(reorder_buff, mbuf3); - - rte_prefetch0(op_ops[i + 4]->sym->m_src); - rte_prefetch0(op_ops[i + 5]->sym->m_src); - rte_prefetch0(op_ops[i + 6]->sym->m_src); - rte_prefetch0(op_ops[i + 7]->sym->m_src); - } - - for (; i < nb_deq_ops; i++) { - mbuf0 = op_ops[i]->sym->m_src; - mbuf0->userdata = op_ops[i]; - rte_reorder_insert(reorder_buff, mbuf0); - } - - nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs, - nb_ops); - for (i = 0; i < nb_drained_mbufs && i < 4; i++) - rte_prefetch0(reorder_mbufs[i]); - - for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8); - i += 4) { - ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata; - ops[i + 1] = *(struct rte_crypto_op **) - reorder_mbufs[i + 1]->userdata; - ops[i + 2] = *(struct rte_crypto_op **) - reorder_mbufs[i + 2]->userdata; - ops[i + 3] = *(struct rte_crypto_op **) - reorder_mbufs[i + 3]->userdata; - - reorder_mbufs[i]->userdata = NULL; - reorder_mbufs[i + 1]->userdata = NULL; - reorder_mbufs[i + 2]->userdata = NULL; - reorder_mbufs[i + 3]->userdata = NULL; - - rte_prefetch0(reorder_mbufs[i + 4]); - rte_prefetch0(reorder_mbufs[i + 5]); - rte_prefetch0(reorder_mbufs[i + 6]); - rte_prefetch0(reorder_mbufs[i + 7]); - } - - for (; i < nb_drained_mbufs; i++) { - ops[i] = *(struct rte_crypto_op **) - reorder_mbufs[i]->userdata; - reorder_mbufs[i]->userdata = NULL; - } - - return nb_drained_mbufs; + return scheduler_order_drain(order_ring, ops, nb_ops); } static int @@ -352,13 +156,22 @@ scheduler_start(struct rte_cryptodev *dev) struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint16_t i; + if (sched_ctx->reordering_enabled) { + dev->enqueue_burst = &schedule_enqueue_ordering; + dev->dequeue_burst = &schedule_dequeue_ordering; + } else { + dev->enqueue_burst = &schedule_enqueue; + dev->dequeue_burst = &schedule_dequeue; + } + for (i = 0; i < dev->data->nb_queue_pairs; i++) { struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx; uint32_t j; - memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM * + memset(rr_qp_ctx->slaves, 0, + RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES * sizeof(struct scheduler_slave)); for (j = 0; j < sched_ctx->nb_slaves; j++) { rr_qp_ctx->slaves[j].dev_id = @@ -370,14 +183,6 @@ scheduler_start(struct rte_cryptodev *dev) rr_qp_ctx->last_enq_slave_idx = 0; rr_qp_ctx->last_deq_slave_idx = 0; - - if (sched_ctx->reordering_enabled) { - qp_ctx->schedule_enqueue = &schedule_enqueue_ordering; - qp_ctx->schedule_dequeue = &schedule_dequeue_ordering; - } else { - qp_ctx->schedule_enqueue = &schedule_enqueue; - qp_ctx->schedule_dequeue = &schedule_dequeue; - } } return 0; @@ -419,7 +224,9 @@ struct rte_cryptodev_scheduler_ops scheduler_rr_ops = { scheduler_start, scheduler_stop, scheduler_config_qp, - scheduler_create_private_ctx + scheduler_create_private_ctx, + NULL, /* option_set */ + NULL /* option_get */ }; struct rte_cryptodev_scheduler scheduler = {