4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 struct rr_scheduler_qp_ctx {
40 struct scheduler_slave slaves[MAX_SLAVES_NUM];
43 uint32_t last_enq_slave_idx;
44 uint32_t last_deq_slave_idx;
48 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
50 struct rr_scheduler_qp_ctx *rr_qp_ctx =
51 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
52 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
53 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
54 uint16_t i, processed_ops;
55 struct rte_cryptodev_sym_session *sessions[nb_ops];
56 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
58 if (unlikely(nb_ops == 0))
61 for (i = 0; i < nb_ops && i < 4; i++)
62 rte_prefetch0(ops[i]->sym->session);
64 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
65 sess0 = (struct scheduler_session *)
66 ops[i]->sym->session->_private;
67 sess1 = (struct scheduler_session *)
68 ops[i+1]->sym->session->_private;
69 sess2 = (struct scheduler_session *)
70 ops[i+2]->sym->session->_private;
71 sess3 = (struct scheduler_session *)
72 ops[i+3]->sym->session->_private;
74 sessions[i] = ops[i]->sym->session;
75 sessions[i + 1] = ops[i + 1]->sym->session;
76 sessions[i + 2] = ops[i + 2]->sym->session;
77 sessions[i + 3] = ops[i + 3]->sym->session;
79 ops[i]->sym->session = sess0->sessions[slave_idx];
80 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
81 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
82 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
84 rte_prefetch0(ops[i + 4]->sym->session);
85 rte_prefetch0(ops[i + 5]->sym->session);
86 rte_prefetch0(ops[i + 6]->sym->session);
87 rte_prefetch0(ops[i + 7]->sym->session);
90 for (; i < nb_ops; i++) {
91 sess0 = (struct scheduler_session *)
92 ops[i]->sym->session->_private;
93 sessions[i] = ops[i]->sym->session;
94 ops[i]->sym->session = sess0->sessions[slave_idx];
97 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
98 slave->qp_id, ops, nb_ops);
100 slave->nb_inflight_cops += processed_ops;
102 rr_qp_ctx->last_enq_slave_idx += 1;
103 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
105 /* recover session if enqueue is failed */
106 if (unlikely(processed_ops < nb_ops)) {
107 for (i = processed_ops; i < nb_ops; i++)
108 ops[i]->sym->session = sessions[i];
111 return processed_ops;
115 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
118 struct rte_ring *order_ring =
119 ((struct scheduler_qp_ctx *)qp)->order_ring;
120 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
122 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
125 scheduler_order_insert(order_ring, ops, nb_ops_enqd);
132 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
134 struct rr_scheduler_qp_ctx *rr_qp_ctx =
135 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
136 struct scheduler_slave *slave;
137 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
140 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
144 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
146 /* looped back, means no inflight cops in the queue */
147 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
149 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
153 slave = &rr_qp_ctx->slaves[last_slave_idx];
155 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
156 slave->qp_id, ops, nb_ops);
159 last_slave_idx %= rr_qp_ctx->nb_slaves;
161 rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
163 slave->nb_inflight_cops -= nb_deq_ops;
169 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
172 struct rte_ring *order_ring =
173 ((struct scheduler_qp_ctx *)qp)->order_ring;
175 schedule_dequeue(qp, ops, nb_ops);
177 return scheduler_order_drain(order_ring, ops, nb_ops);
181 slave_attach(__rte_unused struct rte_cryptodev *dev,
182 __rte_unused uint8_t slave_id)
188 slave_detach(__rte_unused struct rte_cryptodev *dev,
189 __rte_unused uint8_t slave_id)
195 scheduler_start(struct rte_cryptodev *dev)
197 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
200 if (sched_ctx->reordering_enabled) {
201 dev->enqueue_burst = &schedule_enqueue_ordering;
202 dev->dequeue_burst = &schedule_dequeue_ordering;
204 dev->enqueue_burst = &schedule_enqueue;
205 dev->dequeue_burst = &schedule_dequeue;
208 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
209 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
210 struct rr_scheduler_qp_ctx *rr_qp_ctx =
211 qp_ctx->private_qp_ctx;
214 memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
215 sizeof(struct scheduler_slave));
216 for (j = 0; j < sched_ctx->nb_slaves; j++) {
217 rr_qp_ctx->slaves[j].dev_id =
218 sched_ctx->slaves[j].dev_id;
219 rr_qp_ctx->slaves[j].qp_id = i;
222 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
224 rr_qp_ctx->last_enq_slave_idx = 0;
225 rr_qp_ctx->last_deq_slave_idx = 0;
232 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
238 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
240 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
241 struct rr_scheduler_qp_ctx *rr_qp_ctx;
243 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
246 CS_LOG_ERR("failed allocate memory for private queue pair");
250 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
256 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
261 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
267 scheduler_create_private_ctx
270 struct rte_cryptodev_scheduler scheduler = {
271 .name = "roundrobin-scheduler",
272 .description = "scheduler which will round robin burst across "
273 "slave crypto devices",
274 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
275 .ops = &scheduler_rr_ops
278 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;