4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 struct rr_scheduler_qp_ctx {
40 struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
43 uint32_t last_enq_slave_idx;
44 uint32_t last_deq_slave_idx;
48 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
50 struct rr_scheduler_qp_ctx *rr_qp_ctx =
51 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
52 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
53 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
54 uint16_t i, processed_ops;
56 if (unlikely(nb_ops == 0))
59 for (i = 0; i < nb_ops && i < 4; i++)
60 rte_prefetch0(ops[i]->sym->session);
62 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
63 slave->qp_id, ops, nb_ops);
65 slave->nb_inflight_cops += processed_ops;
67 rr_qp_ctx->last_enq_slave_idx += 1;
68 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
74 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
77 struct rte_ring *order_ring =
78 ((struct scheduler_qp_ctx *)qp)->order_ring;
79 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
81 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
84 scheduler_order_insert(order_ring, ops, nb_ops_enqd);
91 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
93 struct rr_scheduler_qp_ctx *rr_qp_ctx =
94 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
95 struct scheduler_slave *slave;
96 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
99 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
103 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
105 /* looped back, means no inflight cops in the queue */
106 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
108 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
112 slave = &rr_qp_ctx->slaves[last_slave_idx];
114 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
115 slave->qp_id, ops, nb_ops);
118 last_slave_idx %= rr_qp_ctx->nb_slaves;
120 rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
122 slave->nb_inflight_cops -= nb_deq_ops;
128 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
131 struct rte_ring *order_ring =
132 ((struct scheduler_qp_ctx *)qp)->order_ring;
134 schedule_dequeue(qp, ops, nb_ops);
136 return scheduler_order_drain(order_ring, ops, nb_ops);
140 slave_attach(__rte_unused struct rte_cryptodev *dev,
141 __rte_unused uint8_t slave_id)
147 slave_detach(__rte_unused struct rte_cryptodev *dev,
148 __rte_unused uint8_t slave_id)
154 scheduler_start(struct rte_cryptodev *dev)
156 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
159 if (sched_ctx->reordering_enabled) {
160 dev->enqueue_burst = &schedule_enqueue_ordering;
161 dev->dequeue_burst = &schedule_dequeue_ordering;
163 dev->enqueue_burst = &schedule_enqueue;
164 dev->dequeue_burst = &schedule_dequeue;
167 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
168 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
169 struct rr_scheduler_qp_ctx *rr_qp_ctx =
170 qp_ctx->private_qp_ctx;
173 memset(rr_qp_ctx->slaves, 0,
174 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
175 sizeof(struct scheduler_slave));
176 for (j = 0; j < sched_ctx->nb_slaves; j++) {
177 rr_qp_ctx->slaves[j].dev_id =
178 sched_ctx->slaves[j].dev_id;
179 rr_qp_ctx->slaves[j].qp_id = i;
182 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
184 rr_qp_ctx->last_enq_slave_idx = 0;
185 rr_qp_ctx->last_deq_slave_idx = 0;
192 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
198 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
200 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
201 struct rr_scheduler_qp_ctx *rr_qp_ctx;
203 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
206 CS_LOG_ERR("failed allocate memory for private queue pair");
210 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
216 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
221 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
227 scheduler_create_private_ctx,
228 NULL, /* option_set */
229 NULL /* option_get */
232 struct rte_cryptodev_scheduler scheduler = {
233 .name = "roundrobin-scheduler",
234 .description = "scheduler which will round robin burst across "
235 "slave crypto devices",
236 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
237 .ops = &scheduler_rr_ops
240 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;