4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 #define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
40 #define SLAVE_IDX_SWITCH_MASK (0x01)
41 #define PRIMARY_SLAVE_IDX 0
42 #define SECONDARY_SLAVE_IDX 1
43 #define NB_PKT_SIZE_SLAVES 2
45 /** pkt size based scheduler context */
46 struct psd_scheduler_ctx {
50 /** pkt size based scheduler queue pair context */
51 struct psd_scheduler_qp_ctx {
52 struct scheduler_slave primary_slave;
53 struct scheduler_slave secondary_slave;
56 } __rte_cache_aligned;
58 /** scheduling operation variables' wrapping */
59 struct psd_schedule_op {
65 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
67 struct scheduler_qp_ctx *qp_ctx = qp;
68 struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
69 struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
70 struct scheduler_session *sess;
71 uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
72 psd_qp_ctx->primary_slave.nb_inflight_cops,
73 psd_qp_ctx->secondary_slave.nb_inflight_cops
75 struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
76 {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
78 struct psd_schedule_op *p_enq_op;
79 uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
82 if (unlikely(nb_ops == 0))
85 for (i = 0; i < nb_ops && i < 4; i++) {
86 rte_prefetch0(ops[i]->sym);
87 rte_prefetch0(ops[i]->sym->session);
90 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
91 rte_prefetch0(ops[i + 4]->sym);
92 rte_prefetch0(ops[i + 4]->sym->session);
93 rte_prefetch0(ops[i + 5]->sym);
94 rte_prefetch0(ops[i + 5]->sym->session);
95 rte_prefetch0(ops[i + 6]->sym);
96 rte_prefetch0(ops[i + 6]->sym->session);
97 rte_prefetch0(ops[i + 7]->sym);
98 rte_prefetch0(ops[i + 7]->sym->session);
100 sess = (struct scheduler_session *)
101 ops[i]->sym->session->_private;
102 /* job_len is initialized as cipher data length, once
103 * it is 0, equals to auth data length
105 job_len = ops[i]->sym->cipher.data.length;
106 job_len += (ops[i]->sym->cipher.data.length == 0) *
107 ops[i]->sym->auth.data.length;
108 /* decide the target op based on the job length */
109 p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
111 /* stop schedule cops before the queue is full, this shall
112 * prevent the failed enqueue
114 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
115 qp_ctx->max_nb_objs) {
120 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
121 ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
124 sess = (struct scheduler_session *)
125 ops[i+1]->sym->session->_private;
126 job_len = ops[i+1]->sym->cipher.data.length;
127 job_len += (ops[i+1]->sym->cipher.data.length == 0) *
128 ops[i+1]->sym->auth.data.length;
129 p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
131 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
132 qp_ctx->max_nb_objs) {
137 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
138 ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
141 sess = (struct scheduler_session *)
142 ops[i+2]->sym->session->_private;
143 job_len = ops[i+2]->sym->cipher.data.length;
144 job_len += (ops[i+2]->sym->cipher.data.length == 0) *
145 ops[i+2]->sym->auth.data.length;
146 p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
148 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
149 qp_ctx->max_nb_objs) {
154 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
155 ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
158 sess = (struct scheduler_session *)
159 ops[i+3]->sym->session->_private;
161 job_len = ops[i+3]->sym->cipher.data.length;
162 job_len += (ops[i+3]->sym->cipher.data.length == 0) *
163 ops[i+3]->sym->auth.data.length;
164 p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
166 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
167 qp_ctx->max_nb_objs) {
172 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
173 ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
177 for (; i < nb_ops; i++) {
178 sess = (struct scheduler_session *)
179 ops[i]->sym->session->_private;
181 job_len = ops[i]->sym->cipher.data.length;
182 job_len += (ops[i]->sym->cipher.data.length == 0) *
183 ops[i]->sym->auth.data.length;
184 p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
186 if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
187 qp_ctx->max_nb_objs) {
192 sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
193 ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
197 processed_ops_pri = rte_cryptodev_enqueue_burst(
198 psd_qp_ctx->primary_slave.dev_id,
199 psd_qp_ctx->primary_slave.qp_id,
200 sched_ops[PRIMARY_SLAVE_IDX],
201 enq_ops[PRIMARY_SLAVE_IDX].pos);
202 /* enqueue shall not fail as the slave queue is monitored */
203 RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
205 psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
207 processed_ops_sec = rte_cryptodev_enqueue_burst(
208 psd_qp_ctx->secondary_slave.dev_id,
209 psd_qp_ctx->secondary_slave.qp_id,
210 sched_ops[SECONDARY_SLAVE_IDX],
211 enq_ops[SECONDARY_SLAVE_IDX].pos);
212 RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
214 psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
216 return processed_ops_pri + processed_ops_sec;
220 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
223 struct rte_ring *order_ring =
224 ((struct scheduler_qp_ctx *)qp)->order_ring;
225 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
227 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
230 scheduler_order_insert(order_ring, ops, nb_ops_enqd);
236 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
238 struct psd_scheduler_qp_ctx *qp_ctx =
239 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
240 struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
241 &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
242 struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
243 uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
245 if (slave->nb_inflight_cops) {
246 nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
247 slave->qp_id, ops, nb_ops);
248 slave->nb_inflight_cops -= nb_deq_ops_pri;
251 qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
253 if (nb_deq_ops_pri == nb_ops)
254 return nb_deq_ops_pri;
256 slave = slaves[qp_ctx->deq_idx];
258 if (slave->nb_inflight_cops) {
259 nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
260 slave->qp_id, &ops[nb_deq_ops_pri],
261 nb_ops - nb_deq_ops_pri);
262 slave->nb_inflight_cops -= nb_deq_ops_sec;
264 if (!slave->nb_inflight_cops)
265 qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
266 SLAVE_IDX_SWITCH_MASK;
269 return nb_deq_ops_pri + nb_deq_ops_sec;
273 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
276 struct rte_ring *order_ring =
277 ((struct scheduler_qp_ctx *)qp)->order_ring;
279 schedule_dequeue(qp, ops, nb_ops);
281 return scheduler_order_drain(order_ring, ops, nb_ops);
285 slave_attach(__rte_unused struct rte_cryptodev *dev,
286 __rte_unused uint8_t slave_id)
292 slave_detach(__rte_unused struct rte_cryptodev *dev,
293 __rte_unused uint8_t slave_id)
299 scheduler_start(struct rte_cryptodev *dev)
301 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
302 struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
305 /* for packet size based scheduler, nb_slaves have to >= 2 */
306 if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
307 CS_LOG_ERR("not enough slaves to start");
311 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
312 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
313 struct psd_scheduler_qp_ctx *ps_qp_ctx =
314 qp_ctx->private_qp_ctx;
316 ps_qp_ctx->primary_slave.dev_id =
317 sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
318 ps_qp_ctx->primary_slave.qp_id = i;
319 ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
321 ps_qp_ctx->secondary_slave.dev_id =
322 sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
323 ps_qp_ctx->secondary_slave.qp_id = i;
324 ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
326 ps_qp_ctx->threshold = psd_ctx->threshold;
329 if (sched_ctx->reordering_enabled) {
330 dev->enqueue_burst = &schedule_enqueue_ordering;
331 dev->dequeue_burst = &schedule_dequeue_ordering;
333 dev->enqueue_burst = &schedule_enqueue;
334 dev->dequeue_burst = &schedule_dequeue;
341 scheduler_stop(struct rte_cryptodev *dev)
345 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
346 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
347 struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
349 if (ps_qp_ctx->primary_slave.nb_inflight_cops +
350 ps_qp_ctx->secondary_slave.nb_inflight_cops) {
351 CS_LOG_ERR("Some crypto ops left in slave queue");
360 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
362 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
363 struct psd_scheduler_qp_ctx *ps_qp_ctx;
365 ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
368 CS_LOG_ERR("failed allocate memory for private queue pair");
372 qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;
378 scheduler_create_private_ctx(struct rte_cryptodev *dev)
380 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
381 struct psd_scheduler_ctx *psd_ctx;
383 if (sched_ctx->private_ctx)
384 rte_free(sched_ctx->private_ctx);
386 psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
389 CS_LOG_ERR("failed allocate memory");
393 psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD;
395 sched_ctx->private_ctx = (void *)psd_ctx;
400 scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
403 struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
404 dev->data->dev_private)->private_ctx;
407 if ((enum rte_cryptodev_schedule_option_type)option_type !=
408 CDEV_SCHED_OPTION_THRESHOLD) {
409 CS_LOG_ERR("Option not supported");
413 threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
415 if (!rte_is_power_of_2(threshold)) {
416 CS_LOG_ERR("Threshold is not power of 2");
420 psd_ctx->threshold = ~(threshold - 1);
426 scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
429 struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
430 dev->data->dev_private)->private_ctx;
431 struct rte_cryptodev_scheduler_threshold_option *threshold_option;
433 if ((enum rte_cryptodev_schedule_option_type)option_type !=
434 CDEV_SCHED_OPTION_THRESHOLD) {
435 CS_LOG_ERR("Option not supported");
439 threshold_option = option;
440 threshold_option->threshold = (~psd_ctx->threshold) + 1;
445 struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
451 scheduler_create_private_ctx,
452 scheduler_option_set,
456 struct rte_cryptodev_scheduler psd_scheduler = {
457 .name = "packet-size-based-scheduler",
458 .description = "scheduler which will distribute crypto op "
459 "burst based on the packet size",
460 .mode = CDEV_SCHED_MODE_PKT_SIZE_DISTR,
461 .ops = &scheduler_ps_ops
464 struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;