4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 struct rr_scheduler_qp_ctx {
40 struct scheduler_slave slaves[MAX_SLAVES_NUM];
43 uint32_t last_enq_slave_idx;
44 uint32_t last_deq_slave_idx;
48 schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
50 struct rr_scheduler_qp_ctx *rr_qp_ctx =
51 ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
52 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
53 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
54 uint16_t i, processed_ops;
55 struct rte_cryptodev_sym_session *sessions[nb_ops];
56 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
58 if (unlikely(nb_ops == 0))
61 for (i = 0; i < nb_ops && i < 4; i++)
62 rte_prefetch0(ops[i]->sym->session);
64 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
65 sess0 = (struct scheduler_session *)
66 ops[i]->sym->session->_private;
67 sess1 = (struct scheduler_session *)
68 ops[i+1]->sym->session->_private;
69 sess2 = (struct scheduler_session *)
70 ops[i+2]->sym->session->_private;
71 sess3 = (struct scheduler_session *)
72 ops[i+3]->sym->session->_private;
74 sessions[i] = ops[i]->sym->session;
75 sessions[i + 1] = ops[i + 1]->sym->session;
76 sessions[i + 2] = ops[i + 2]->sym->session;
77 sessions[i + 3] = ops[i + 3]->sym->session;
79 ops[i]->sym->session = sess0->sessions[slave_idx];
80 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
81 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
82 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
84 rte_prefetch0(ops[i + 4]->sym->session);
85 rte_prefetch0(ops[i + 5]->sym->session);
86 rte_prefetch0(ops[i + 6]->sym->session);
87 rte_prefetch0(ops[i + 7]->sym->session);
90 for (; i < nb_ops; i++) {
91 sess0 = (struct scheduler_session *)
92 ops[i]->sym->session->_private;
93 ops[i]->sym->session = sess0->sessions[slave_idx];
96 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
97 slave->qp_id, ops, nb_ops);
99 slave->nb_inflight_cops += processed_ops;
101 rr_qp_ctx->last_enq_slave_idx += 1;
102 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
104 /* recover session if enqueue is failed */
105 if (unlikely(processed_ops < nb_ops)) {
106 for (i = processed_ops; i < nb_ops; i++)
107 ops[i]->sym->session = sessions[i];
110 return processed_ops;
114 schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
117 struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
118 struct rr_scheduler_qp_ctx *rr_qp_ctx =
119 gen_qp_ctx->private_qp_ctx;
120 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
121 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
122 uint16_t i, processed_ops;
123 struct rte_cryptodev_sym_session *sessions[nb_ops];
124 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
126 if (unlikely(nb_ops == 0))
129 for (i = 0; i < nb_ops && i < 4; i++) {
130 rte_prefetch0(ops[i]->sym->session);
131 rte_prefetch0(ops[i]->sym->m_src);
134 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
135 sess0 = (struct scheduler_session *)
136 ops[i]->sym->session->_private;
137 sess1 = (struct scheduler_session *)
138 ops[i+1]->sym->session->_private;
139 sess2 = (struct scheduler_session *)
140 ops[i+2]->sym->session->_private;
141 sess3 = (struct scheduler_session *)
142 ops[i+3]->sym->session->_private;
144 sessions[i] = ops[i]->sym->session;
145 sessions[i + 1] = ops[i + 1]->sym->session;
146 sessions[i + 2] = ops[i + 2]->sym->session;
147 sessions[i + 3] = ops[i + 3]->sym->session;
149 ops[i]->sym->session = sess0->sessions[slave_idx];
150 ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
151 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
152 ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
153 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
154 ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
155 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
156 ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
158 rte_prefetch0(ops[i + 4]->sym->session);
159 rte_prefetch0(ops[i + 4]->sym->m_src);
160 rte_prefetch0(ops[i + 5]->sym->session);
161 rte_prefetch0(ops[i + 5]->sym->m_src);
162 rte_prefetch0(ops[i + 6]->sym->session);
163 rte_prefetch0(ops[i + 6]->sym->m_src);
164 rte_prefetch0(ops[i + 7]->sym->session);
165 rte_prefetch0(ops[i + 7]->sym->m_src);
168 for (; i < nb_ops; i++) {
169 sess0 = (struct scheduler_session *)
170 ops[i]->sym->session->_private;
171 ops[i]->sym->session = sess0->sessions[slave_idx];
172 ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
175 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
176 slave->qp_id, ops, nb_ops);
178 slave->nb_inflight_cops += processed_ops;
180 rr_qp_ctx->last_enq_slave_idx += 1;
181 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
183 /* recover session if enqueue is failed */
184 if (unlikely(processed_ops < nb_ops)) {
185 for (i = processed_ops; i < nb_ops; i++)
186 ops[i]->sym->session = sessions[i];
189 return processed_ops;
194 schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
196 struct rr_scheduler_qp_ctx *rr_qp_ctx =
197 ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
198 struct scheduler_slave *slave;
199 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
202 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
206 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
208 /* looped back, means no inflight cops in the queue */
209 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
211 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
215 slave = &rr_qp_ctx->slaves[last_slave_idx];
217 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
218 slave->qp_id, ops, nb_ops);
221 last_slave_idx %= rr_qp_ctx->nb_slaves;
223 rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
225 slave->nb_inflight_cops -= nb_deq_ops;
231 schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
234 struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
235 struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
236 struct scheduler_slave *slave;
237 struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
238 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
239 uint16_t nb_deq_ops, nb_drained_mbufs;
240 const uint16_t nb_op_ops = nb_ops;
241 struct rte_crypto_op *op_ops[nb_op_ops];
242 struct rte_mbuf *reorder_mbufs[nb_op_ops];
243 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
246 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
250 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
252 /* looped back, means no inflight cops in the queue */
253 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
255 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
259 slave = &rr_qp_ctx->slaves[last_slave_idx];
261 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
262 slave->qp_id, op_ops, nb_ops);
264 rr_qp_ctx->last_deq_slave_idx += 1;
265 rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves;
267 slave->nb_inflight_cops -= nb_deq_ops;
269 for (i = 0; i < nb_deq_ops && i < 4; i++)
270 rte_prefetch0(op_ops[i]->sym->m_src);
272 for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) {
273 mbuf0 = op_ops[i]->sym->m_src;
274 mbuf1 = op_ops[i + 1]->sym->m_src;
275 mbuf2 = op_ops[i + 2]->sym->m_src;
276 mbuf3 = op_ops[i + 3]->sym->m_src;
278 mbuf0->userdata = op_ops[i];
279 mbuf1->userdata = op_ops[i + 1];
280 mbuf2->userdata = op_ops[i + 2];
281 mbuf3->userdata = op_ops[i + 3];
283 rte_reorder_insert(reorder_buff, mbuf0);
284 rte_reorder_insert(reorder_buff, mbuf1);
285 rte_reorder_insert(reorder_buff, mbuf2);
286 rte_reorder_insert(reorder_buff, mbuf3);
288 rte_prefetch0(op_ops[i + 4]->sym->m_src);
289 rte_prefetch0(op_ops[i + 5]->sym->m_src);
290 rte_prefetch0(op_ops[i + 6]->sym->m_src);
291 rte_prefetch0(op_ops[i + 7]->sym->m_src);
294 for (; i < nb_deq_ops; i++) {
295 mbuf0 = op_ops[i]->sym->m_src;
296 mbuf0->userdata = op_ops[i];
297 rte_reorder_insert(reorder_buff, mbuf0);
300 nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs,
302 for (i = 0; i < nb_drained_mbufs && i < 4; i++)
303 rte_prefetch0(reorder_mbufs[i]);
305 for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8);
307 ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata;
308 ops[i + 1] = *(struct rte_crypto_op **)
309 reorder_mbufs[i + 1]->userdata;
310 ops[i + 2] = *(struct rte_crypto_op **)
311 reorder_mbufs[i + 2]->userdata;
312 ops[i + 3] = *(struct rte_crypto_op **)
313 reorder_mbufs[i + 3]->userdata;
315 reorder_mbufs[i]->userdata = NULL;
316 reorder_mbufs[i + 1]->userdata = NULL;
317 reorder_mbufs[i + 2]->userdata = NULL;
318 reorder_mbufs[i + 3]->userdata = NULL;
320 rte_prefetch0(reorder_mbufs[i + 4]);
321 rte_prefetch0(reorder_mbufs[i + 5]);
322 rte_prefetch0(reorder_mbufs[i + 6]);
323 rte_prefetch0(reorder_mbufs[i + 7]);
326 for (; i < nb_drained_mbufs; i++) {
327 ops[i] = *(struct rte_crypto_op **)
328 reorder_mbufs[i]->userdata;
329 reorder_mbufs[i]->userdata = NULL;
332 return nb_drained_mbufs;
336 slave_attach(__rte_unused struct rte_cryptodev *dev,
337 __rte_unused uint8_t slave_id)
343 slave_detach(__rte_unused struct rte_cryptodev *dev,
344 __rte_unused uint8_t slave_id)
350 scheduler_start(struct rte_cryptodev *dev)
352 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
356 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
357 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
358 struct rr_scheduler_qp_ctx *rr_qp_ctx =
359 qp_ctx->private_qp_ctx;
361 uint16_t qp_id = rr_qp_ctx->slaves[0].qp_id;
363 memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
364 sizeof(struct scheduler_slave));
365 for (j = 0; j < sched_ctx->nb_slaves; j++) {
366 rr_qp_ctx->slaves[j].dev_id =
367 sched_ctx->slaves[i].dev_id;
368 rr_qp_ctx->slaves[j].qp_id = qp_id;
371 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
373 rr_qp_ctx->last_enq_slave_idx = 0;
374 rr_qp_ctx->last_deq_slave_idx = 0;
376 if (sched_ctx->reordering_enabled) {
377 qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
378 qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
380 qp_ctx->schedule_enqueue = &schedule_enqueue;
381 qp_ctx->schedule_dequeue = &schedule_dequeue;
389 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
395 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
397 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
398 struct rr_scheduler_qp_ctx *rr_qp_ctx;
400 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
403 CS_LOG_ERR("failed allocate memory for private queue pair");
407 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
413 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
418 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
424 scheduler_create_private_ctx
427 struct rte_cryptodev_scheduler scheduler = {
428 .name = "roundrobin-scheduler",
429 .description = "scheduler which will round robin burst across "
430 "slave crypto devices",
431 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
432 .ops = &scheduler_rr_ops
435 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;