4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 struct rr_scheduler_qp_ctx {
40 struct scheduler_slave slaves[MAX_SLAVES_NUM];
43 uint32_t last_enq_slave_idx;
44 uint32_t last_deq_slave_idx;
48 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
50 struct rr_scheduler_qp_ctx *rr_qp_ctx =
51 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
52 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
53 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
54 uint16_t i, processed_ops;
55 struct rte_cryptodev_sym_session *sessions[nb_ops];
56 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
58 if (unlikely(nb_ops == 0))
61 for (i = 0; i < nb_ops && i < 4; i++)
62 rte_prefetch0(ops[i]->sym->session);
64 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
65 sess0 = (struct scheduler_session *)
66 ops[i]->sym->session->_private;
67 sess1 = (struct scheduler_session *)
68 ops[i+1]->sym->session->_private;
69 sess2 = (struct scheduler_session *)
70 ops[i+2]->sym->session->_private;
71 sess3 = (struct scheduler_session *)
72 ops[i+3]->sym->session->_private;
74 sessions[i] = ops[i]->sym->session;
75 sessions[i + 1] = ops[i + 1]->sym->session;
76 sessions[i + 2] = ops[i + 2]->sym->session;
77 sessions[i + 3] = ops[i + 3]->sym->session;
79 ops[i]->sym->session = sess0->sessions[slave_idx];
80 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
81 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
82 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
84 rte_prefetch0(ops[i + 4]->sym->session);
85 rte_prefetch0(ops[i + 5]->sym->session);
86 rte_prefetch0(ops[i + 6]->sym->session);
87 rte_prefetch0(ops[i + 7]->sym->session);
90 for (; i < nb_ops; i++) {
91 sess0 = (struct scheduler_session *)
92 ops[i]->sym->session->_private;
93 sessions[i] = ops[i]->sym->session;
94 ops[i]->sym->session = sess0->sessions[slave_idx];
97 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
98 slave->qp_id, ops, nb_ops);
100 slave->nb_inflight_cops += processed_ops;
102 rr_qp_ctx->last_enq_slave_idx += 1;
103 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
105 /* recover session if enqueue is failed */
106 if (unlikely(processed_ops < nb_ops)) {
107 for (i = processed_ops; i < nb_ops; i++)
108 ops[i]->sym->session = sessions[i];
111 return processed_ops;
115 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
118 struct scheduler_qp_ctx *qp_ctx = qp;
119 struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
120 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
121 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
122 uint16_t i, processed_ops;
123 struct rte_cryptodev_sym_session *sessions[nb_ops];
124 struct scheduler_session *sess0, *sess1, *sess2, *sess3;
126 if (unlikely(nb_ops == 0))
129 for (i = 0; i < nb_ops && i < 4; i++) {
130 rte_prefetch0(ops[i]->sym->session);
131 rte_prefetch0(ops[i]->sym->m_src);
134 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
135 sess0 = (struct scheduler_session *)
136 ops[i]->sym->session->_private;
137 sess1 = (struct scheduler_session *)
138 ops[i+1]->sym->session->_private;
139 sess2 = (struct scheduler_session *)
140 ops[i+2]->sym->session->_private;
141 sess3 = (struct scheduler_session *)
142 ops[i+3]->sym->session->_private;
144 sessions[i] = ops[i]->sym->session;
145 sessions[i + 1] = ops[i + 1]->sym->session;
146 sessions[i + 2] = ops[i + 2]->sym->session;
147 sessions[i + 3] = ops[i + 3]->sym->session;
149 ops[i]->sym->session = sess0->sessions[slave_idx];
150 ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
151 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
152 ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++;
153 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
154 ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++;
155 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
156 ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++;
158 rte_prefetch0(ops[i + 4]->sym->session);
159 rte_prefetch0(ops[i + 4]->sym->m_src);
160 rte_prefetch0(ops[i + 5]->sym->session);
161 rte_prefetch0(ops[i + 5]->sym->m_src);
162 rte_prefetch0(ops[i + 6]->sym->session);
163 rte_prefetch0(ops[i + 6]->sym->m_src);
164 rte_prefetch0(ops[i + 7]->sym->session);
165 rte_prefetch0(ops[i + 7]->sym->m_src);
168 for (; i < nb_ops; i++) {
169 sess0 = (struct scheduler_session *)
170 ops[i]->sym->session->_private;
171 sessions[i] = ops[i]->sym->session;
172 ops[i]->sym->session = sess0->sessions[slave_idx];
173 ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
176 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
177 slave->qp_id, ops, nb_ops);
179 slave->nb_inflight_cops += processed_ops;
181 rr_qp_ctx->last_enq_slave_idx += 1;
182 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
184 /* recover session if enqueue is failed */
185 if (unlikely(processed_ops < nb_ops)) {
186 for (i = processed_ops; i < nb_ops; i++)
187 ops[i]->sym->session = sessions[i];
190 return processed_ops;
195 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
197 struct rr_scheduler_qp_ctx *rr_qp_ctx =
198 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
199 struct scheduler_slave *slave;
200 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
203 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
207 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
209 /* looped back, means no inflight cops in the queue */
210 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
212 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
216 slave = &rr_qp_ctx->slaves[last_slave_idx];
218 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
219 slave->qp_id, ops, nb_ops);
222 last_slave_idx %= rr_qp_ctx->nb_slaves;
224 rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
226 slave->nb_inflight_cops -= nb_deq_ops;
232 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
235 struct scheduler_qp_ctx *qp_ctx = (struct scheduler_qp_ctx *)qp;
236 struct rr_scheduler_qp_ctx *rr_qp_ctx = (qp_ctx->private_qp_ctx);
237 struct scheduler_slave *slave;
238 struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf;
239 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
240 uint16_t nb_deq_ops, nb_drained_mbufs;
241 const uint16_t nb_op_ops = nb_ops;
242 struct rte_crypto_op *op_ops[nb_op_ops];
243 struct rte_mbuf *reorder_mbufs[nb_op_ops];
244 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
247 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
251 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
253 /* looped back, means no inflight cops in the queue */
254 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
256 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
260 slave = &rr_qp_ctx->slaves[last_slave_idx];
262 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
263 slave->qp_id, op_ops, nb_ops);
265 rr_qp_ctx->last_deq_slave_idx += 1;
266 rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves;
268 slave->nb_inflight_cops -= nb_deq_ops;
270 for (i = 0; i < nb_deq_ops && i < 4; i++)
271 rte_prefetch0(op_ops[i]->sym->m_src);
273 for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) {
274 mbuf0 = op_ops[i]->sym->m_src;
275 mbuf1 = op_ops[i + 1]->sym->m_src;
276 mbuf2 = op_ops[i + 2]->sym->m_src;
277 mbuf3 = op_ops[i + 3]->sym->m_src;
279 mbuf0->userdata = op_ops[i];
280 mbuf1->userdata = op_ops[i + 1];
281 mbuf2->userdata = op_ops[i + 2];
282 mbuf3->userdata = op_ops[i + 3];
284 rte_reorder_insert(reorder_buff, mbuf0);
285 rte_reorder_insert(reorder_buff, mbuf1);
286 rte_reorder_insert(reorder_buff, mbuf2);
287 rte_reorder_insert(reorder_buff, mbuf3);
289 rte_prefetch0(op_ops[i + 4]->sym->m_src);
290 rte_prefetch0(op_ops[i + 5]->sym->m_src);
291 rte_prefetch0(op_ops[i + 6]->sym->m_src);
292 rte_prefetch0(op_ops[i + 7]->sym->m_src);
295 for (; i < nb_deq_ops; i++) {
296 mbuf0 = op_ops[i]->sym->m_src;
297 mbuf0->userdata = op_ops[i];
298 rte_reorder_insert(reorder_buff, mbuf0);
301 nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs,
303 for (i = 0; i < nb_drained_mbufs && i < 4; i++)
304 rte_prefetch0(reorder_mbufs[i]);
306 for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8);
308 ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata;
309 ops[i + 1] = *(struct rte_crypto_op **)
310 reorder_mbufs[i + 1]->userdata;
311 ops[i + 2] = *(struct rte_crypto_op **)
312 reorder_mbufs[i + 2]->userdata;
313 ops[i + 3] = *(struct rte_crypto_op **)
314 reorder_mbufs[i + 3]->userdata;
316 reorder_mbufs[i]->userdata = NULL;
317 reorder_mbufs[i + 1]->userdata = NULL;
318 reorder_mbufs[i + 2]->userdata = NULL;
319 reorder_mbufs[i + 3]->userdata = NULL;
321 rte_prefetch0(reorder_mbufs[i + 4]);
322 rte_prefetch0(reorder_mbufs[i + 5]);
323 rte_prefetch0(reorder_mbufs[i + 6]);
324 rte_prefetch0(reorder_mbufs[i + 7]);
327 for (; i < nb_drained_mbufs; i++) {
328 ops[i] = *(struct rte_crypto_op **)
329 reorder_mbufs[i]->userdata;
330 reorder_mbufs[i]->userdata = NULL;
333 return nb_drained_mbufs;
337 slave_attach(__rte_unused struct rte_cryptodev *dev,
338 __rte_unused uint8_t slave_id)
344 slave_detach(__rte_unused struct rte_cryptodev *dev,
345 __rte_unused uint8_t slave_id)
351 scheduler_start(struct rte_cryptodev *dev)
353 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
356 if (sched_ctx->reordering_enabled) {
357 dev->enqueue_burst = &schedule_enqueue_ordering;
358 dev->dequeue_burst = &schedule_dequeue_ordering;
360 dev->enqueue_burst = &schedule_enqueue;
361 dev->dequeue_burst = &schedule_dequeue;
364 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
365 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
366 struct rr_scheduler_qp_ctx *rr_qp_ctx =
367 qp_ctx->private_qp_ctx;
370 memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
371 sizeof(struct scheduler_slave));
372 for (j = 0; j < sched_ctx->nb_slaves; j++) {
373 rr_qp_ctx->slaves[j].dev_id =
374 sched_ctx->slaves[j].dev_id;
375 rr_qp_ctx->slaves[j].qp_id = i;
378 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
380 rr_qp_ctx->last_enq_slave_idx = 0;
381 rr_qp_ctx->last_deq_slave_idx = 0;
388 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
394 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
396 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
397 struct rr_scheduler_qp_ctx *rr_qp_ctx;
399 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
402 CS_LOG_ERR("failed allocate memory for private queue pair");
406 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
412 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
417 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
423 scheduler_create_private_ctx
426 struct rte_cryptodev_scheduler scheduler = {
427 .name = "roundrobin-scheduler",
428 .description = "scheduler which will round robin burst across "
429 "slave crypto devices",
430 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
431 .ops = &scheduler_rr_ops
434 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;