4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 struct rr_scheduler_qp_ctx {
40 struct scheduler_slave slaves[MAX_SLAVES_NUM];
43 uint32_t last_enq_slave_idx;
44 uint32_t last_deq_slave_idx;
48 schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
50 struct rr_scheduler_qp_ctx *rr_qp_ctx =
51 ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
52 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
53 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
54 uint16_t i, processed_ops;
55 struct rte_cryptodev_sym_session *sessions[nb_ops];
56 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
58 if (unlikely(nb_ops == 0))
61 for (i = 0; i < nb_ops && i < 4; i++)
62 rte_prefetch0(ops[i]->sym->session);
64 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
65 sess0 = (struct scheduler_session *)
66 ops[i]->sym->session->_private;
67 sess1 = (struct scheduler_session *)
68 ops[i+1]->sym->session->_private;
69 sess2 = (struct scheduler_session *)
70 ops[i+2]->sym->session->_private;
71 sess3 = (struct scheduler_session *)
72 ops[i+3]->sym->session->_private;
74 sessions[i] = ops[i]->sym->session;
75 sessions[i + 1] = ops[i + 1]->sym->session;
76 sessions[i + 2] = ops[i + 2]->sym->session;
77 sessions[i + 3] = ops[i + 3]->sym->session;
79 ops[i]->sym->session = sess0->sessions[slave_idx];
80 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
81 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
82 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
84 rte_prefetch0(ops[i + 4]->sym->session);
85 rte_prefetch0(ops[i + 5]->sym->session);
86 rte_prefetch0(ops[i + 6]->sym->session);
87 rte_prefetch0(ops[i + 7]->sym->session);
90 for (; i < nb_ops; i++) {
91 sess0 = (struct scheduler_session *)
92 ops[i]->sym->session->_private;
93 sessions[i] = ops[i]->sym->session;
94 ops[i]->sym->session = sess0->sessions[slave_idx];
97 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
98 slave->qp_id, ops, nb_ops);
100 slave->nb_inflight_cops += processed_ops;
102 rr_qp_ctx->last_enq_slave_idx += 1;
103 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
105 /* recover session if enqueue is failed */
106 if (unlikely(processed_ops < nb_ops)) {
107 for (i = processed_ops; i < nb_ops; i++)
108 ops[i]->sym->session = sessions[i];
111 return processed_ops;
115 schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
118 struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
119 struct rr_scheduler_qp_ctx *rr_qp_ctx =
120 gen_qp_ctx->private_qp_ctx;
121 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
122 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
123 uint16_t i, processed_ops;
124 struct rte_cryptodev_sym_session *sessions[nb_ops];
125 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
127 if (unlikely(nb_ops == 0))
130 for (i = 0; i < nb_ops && i < 4; i++) {
131 rte_prefetch0(ops[i]->sym->session);
132 rte_prefetch0(ops[i]->sym->m_src);
135 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
136 sess0 = (struct scheduler_session *)
137 ops[i]->sym->session->_private;
138 sess1 = (struct scheduler_session *)
139 ops[i+1]->sym->session->_private;
140 sess2 = (struct scheduler_session *)
141 ops[i+2]->sym->session->_private;
142 sess3 = (struct scheduler_session *)
143 ops[i+3]->sym->session->_private;
145 sessions[i] = ops[i]->sym->session;
146 sessions[i + 1] = ops[i + 1]->sym->session;
147 sessions[i + 2] = ops[i + 2]->sym->session;
148 sessions[i + 3] = ops[i + 3]->sym->session;
150 ops[i]->sym->session = sess0->sessions[slave_idx];
151 ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
152 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
153 ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
154 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
155 ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
156 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
157 ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
159 rte_prefetch0(ops[i + 4]->sym->session);
160 rte_prefetch0(ops[i + 4]->sym->m_src);
161 rte_prefetch0(ops[i + 5]->sym->session);
162 rte_prefetch0(ops[i + 5]->sym->m_src);
163 rte_prefetch0(ops[i + 6]->sym->session);
164 rte_prefetch0(ops[i + 6]->sym->m_src);
165 rte_prefetch0(ops[i + 7]->sym->session);
166 rte_prefetch0(ops[i + 7]->sym->m_src);
169 for (; i < nb_ops; i++) {
170 sess0 = (struct scheduler_session *)
171 ops[i]->sym->session->_private;
172 sessions[i] = ops[i]->sym->session;
173 ops[i]->sym->session = sess0->sessions[slave_idx];
174 ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
177 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
178 slave->qp_id, ops, nb_ops);
180 slave->nb_inflight_cops += processed_ops;
182 rr_qp_ctx->last_enq_slave_idx += 1;
183 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
185 /* recover session if enqueue is failed */
186 if (unlikely(processed_ops < nb_ops)) {
187 for (i = processed_ops; i < nb_ops; i++)
188 ops[i]->sym->session = sessions[i];
191 return processed_ops;
196 schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
198 struct rr_scheduler_qp_ctx *rr_qp_ctx =
199 ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
200 struct scheduler_slave *slave;
201 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
204 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
208 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
210 /* looped back, means no inflight cops in the queue */
211 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
213 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
217 slave = &rr_qp_ctx->slaves[last_slave_idx];
219 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
220 slave->qp_id, ops, nb_ops);
223 last_slave_idx %= rr_qp_ctx->nb_slaves;
225 rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
227 slave->nb_inflight_cops -= nb_deq_ops;
233 schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
236 struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
237 struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
238 struct scheduler_slave *slave;
239 struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
240 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
241 uint16_t nb_deq_ops, nb_drained_mbufs;
242 const uint16_t nb_op_ops = nb_ops;
243 struct rte_crypto_op *op_ops[nb_op_ops];
244 struct rte_mbuf *reorder_mbufs[nb_op_ops];
245 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
248 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
252 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
254 /* looped back, means no inflight cops in the queue */
255 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
257 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
261 slave = &rr_qp_ctx->slaves[last_slave_idx];
263 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
264 slave->qp_id, op_ops, nb_ops);
266 rr_qp_ctx->last_deq_slave_idx += 1;
267 rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves;
269 slave->nb_inflight_cops -= nb_deq_ops;
271 for (i = 0; i < nb_deq_ops && i < 4; i++)
272 rte_prefetch0(op_ops[i]->sym->m_src);
274 for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) {
275 mbuf0 = op_ops[i]->sym->m_src;
276 mbuf1 = op_ops[i + 1]->sym->m_src;
277 mbuf2 = op_ops[i + 2]->sym->m_src;
278 mbuf3 = op_ops[i + 3]->sym->m_src;
280 mbuf0->userdata = op_ops[i];
281 mbuf1->userdata = op_ops[i + 1];
282 mbuf2->userdata = op_ops[i + 2];
283 mbuf3->userdata = op_ops[i + 3];
285 rte_reorder_insert(reorder_buff, mbuf0);
286 rte_reorder_insert(reorder_buff, mbuf1);
287 rte_reorder_insert(reorder_buff, mbuf2);
288 rte_reorder_insert(reorder_buff, mbuf3);
290 rte_prefetch0(op_ops[i + 4]->sym->m_src);
291 rte_prefetch0(op_ops[i + 5]->sym->m_src);
292 rte_prefetch0(op_ops[i + 6]->sym->m_src);
293 rte_prefetch0(op_ops[i + 7]->sym->m_src);
296 for (; i < nb_deq_ops; i++) {
297 mbuf0 = op_ops[i]->sym->m_src;
298 mbuf0->userdata = op_ops[i];
299 rte_reorder_insert(reorder_buff, mbuf0);
302 nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs,
304 for (i = 0; i < nb_drained_mbufs && i < 4; i++)
305 rte_prefetch0(reorder_mbufs[i]);
307 for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8);
309 ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata;
310 ops[i + 1] = *(struct rte_crypto_op **)
311 reorder_mbufs[i + 1]->userdata;
312 ops[i + 2] = *(struct rte_crypto_op **)
313 reorder_mbufs[i + 2]->userdata;
314 ops[i + 3] = *(struct rte_crypto_op **)
315 reorder_mbufs[i + 3]->userdata;
317 reorder_mbufs[i]->userdata = NULL;
318 reorder_mbufs[i + 1]->userdata = NULL;
319 reorder_mbufs[i + 2]->userdata = NULL;
320 reorder_mbufs[i + 3]->userdata = NULL;
322 rte_prefetch0(reorder_mbufs[i + 4]);
323 rte_prefetch0(reorder_mbufs[i + 5]);
324 rte_prefetch0(reorder_mbufs[i + 6]);
325 rte_prefetch0(reorder_mbufs[i + 7]);
328 for (; i < nb_drained_mbufs; i++) {
329 ops[i] = *(struct rte_crypto_op **)
330 reorder_mbufs[i]->userdata;
331 reorder_mbufs[i]->userdata = NULL;
334 return nb_drained_mbufs;
338 slave_attach(__rte_unused struct rte_cryptodev *dev,
339 __rte_unused uint8_t slave_id)
345 slave_detach(__rte_unused struct rte_cryptodev *dev,
346 __rte_unused uint8_t slave_id)
352 scheduler_start(struct rte_cryptodev *dev)
354 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
357 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
358 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
359 struct rr_scheduler_qp_ctx *rr_qp_ctx =
360 qp_ctx->private_qp_ctx;
363 memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
364 sizeof(struct scheduler_slave));
365 for (j = 0; j < sched_ctx->nb_slaves; j++) {
366 rr_qp_ctx->slaves[j].dev_id =
367 sched_ctx->slaves[j].dev_id;
368 rr_qp_ctx->slaves[j].qp_id = i;
371 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
373 rr_qp_ctx->last_enq_slave_idx = 0;
374 rr_qp_ctx->last_deq_slave_idx = 0;
376 if (sched_ctx->reordering_enabled) {
377 qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
378 qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
380 qp_ctx->schedule_enqueue = &schedule_enqueue;
381 qp_ctx->schedule_dequeue = &schedule_dequeue;
389 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
395 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
397 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
398 struct rr_scheduler_qp_ctx *rr_qp_ctx;
400 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
403 CS_LOG_ERR("failed allocate memory for private queue pair");
407 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
413 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
418 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
424 scheduler_create_private_ctx
427 struct rte_cryptodev_scheduler scheduler = {
428 .name = "roundrobin-scheduler",
429 .description = "scheduler which will round robin burst across "
430 "slave crypto devices",
431 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
432 .ops = &scheduler_rr_ops
435 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;