RTE_CRYPTODEV_VDEV_SOCKET_ID
};
-static uint16_t
-scheduler_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct scheduler_qp_ctx *qp_ctx = queue_pair;
- uint16_t processed_ops;
-
- processed_ops = (*qp_ctx->schedule_enqueue)(qp_ctx, ops,
- nb_ops);
-
- return processed_ops;
-}
-
-static uint16_t
-scheduler_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct scheduler_qp_ctx *qp_ctx = queue_pair;
- uint16_t processed_ops;
-
- processed_ops = (*qp_ctx->schedule_dequeue)(qp_ctx, ops,
- nb_ops);
-
- return processed_ops;
-}
-
static int
attach_init_slaves(uint8_t scheduler_id,
const uint8_t *slaves, const uint8_t nb_slaves)
dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
- dev->enqueue_burst = scheduler_enqueue_burst;
- dev->dequeue_burst = scheduler_dequeue_burst;
-
sched_ctx = dev->data->dev_private;
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;
};
static uint16_t
-schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
- ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
}
static uint16_t
-schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
- struct rr_scheduler_qp_ctx *rr_qp_ctx =
- gen_qp_ctx->private_qp_ctx;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
sessions[i + 3] = ops[i + 3]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
- ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
- ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 3]->sym->session = sess3->sessions[slave_idx];
- ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++;
rte_prefetch0(ops[i + 4]->sym->session);
rte_prefetch0(ops[i + 4]->sym->m_src);
ops[i]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
static uint16_t
-schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
- ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
struct scheduler_slave *slave;
uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
uint16_t nb_deq_ops;
}
static uint16_t
-schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
- struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
+ struct scheduler_qp_ctx *qp_ctx = (struct scheduler_qp_ctx *)qp;
+ struct rr_scheduler_qp_ctx *rr_qp_ctx = (qp_ctx->private_qp_ctx);
struct scheduler_slave *slave;
- struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
+ struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf;
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
uint16_t nb_deq_ops, nb_drained_mbufs;
const uint16_t nb_op_ops = nb_ops;
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint16_t i;
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
struct rr_scheduler_qp_ctx *rr_qp_ctx =
rr_qp_ctx->last_enq_slave_idx = 0;
rr_qp_ctx->last_deq_slave_idx = 0;
-
- if (sched_ctx->reordering_enabled) {
- qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
- qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
- } else {
- qp_ctx->schedule_enqueue = &schedule_enqueue;
- qp_ctx->schedule_dequeue = &schedule_dequeue;
- }
}
return 0;