bc4a63210659838eaf912c347ea56a1c0566a69c
[dpdk.git] / drivers / crypto / scheduler / scheduler_roundrobin.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_malloc.h>
7
8 #include "rte_cryptodev_scheduler_operations.h"
9 #include "scheduler_pmd_private.h"
10
11 struct rr_scheduler_qp_ctx {
12         struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
13         uint32_t nb_workers;
14
15         uint32_t last_enq_worker_idx;
16         uint32_t last_deq_worker_idx;
17 };
18
19 static uint16_t
20 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
21 {
22         struct rr_scheduler_qp_ctx *rr_qp_ctx =
23                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
24         uint32_t worker_idx = rr_qp_ctx->last_enq_worker_idx;
25         struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx];
26         uint16_t i, processed_ops;
27
28         if (unlikely(nb_ops == 0))
29                 return 0;
30
31         for (i = 0; i < nb_ops && i < 4; i++)
32                 rte_prefetch0(ops[i]->sym->session);
33
34         processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
35                         worker->qp_id, ops, nb_ops);
36
37         worker->nb_inflight_cops += processed_ops;
38
39         rr_qp_ctx->last_enq_worker_idx += 1;
40         rr_qp_ctx->last_enq_worker_idx %= rr_qp_ctx->nb_workers;
41
42         return processed_ops;
43 }
44
45 static uint16_t
46 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
47                 uint16_t nb_ops)
48 {
49         struct rte_ring *order_ring =
50                         ((struct scheduler_qp_ctx *)qp)->order_ring;
51         uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
52                         nb_ops);
53         uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
54                         nb_ops_to_enq);
55
56         scheduler_order_insert(order_ring, ops, nb_ops_enqd);
57
58         return nb_ops_enqd;
59 }
60
61
62 static uint16_t
63 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
64 {
65         struct rr_scheduler_qp_ctx *rr_qp_ctx =
66                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
67         struct scheduler_worker *worker;
68         uint32_t last_worker_idx = rr_qp_ctx->last_deq_worker_idx;
69         uint16_t nb_deq_ops;
70
71         if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
72                         == 0)) {
73                 do {
74                         last_worker_idx += 1;
75
76                         if (unlikely(last_worker_idx >= rr_qp_ctx->nb_workers))
77                                 last_worker_idx = 0;
78                         /* looped back, means no inflight cops in the queue */
79                         if (last_worker_idx == rr_qp_ctx->last_deq_worker_idx)
80                                 return 0;
81                 } while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
82                                 == 0);
83         }
84
85         worker = &rr_qp_ctx->workers[last_worker_idx];
86
87         nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
88                         worker->qp_id, ops, nb_ops);
89
90         last_worker_idx += 1;
91         last_worker_idx %= rr_qp_ctx->nb_workers;
92
93         rr_qp_ctx->last_deq_worker_idx = last_worker_idx;
94
95         worker->nb_inflight_cops -= nb_deq_ops;
96
97         return nb_deq_ops;
98 }
99
100 static uint16_t
101 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
102                 uint16_t nb_ops)
103 {
104         struct rte_ring *order_ring =
105                         ((struct scheduler_qp_ctx *)qp)->order_ring;
106
107         schedule_dequeue(qp, ops, nb_ops);
108
109         return scheduler_order_drain(order_ring, ops, nb_ops);
110 }
111
112 static int
113 worker_attach(__rte_unused struct rte_cryptodev *dev,
114                 __rte_unused uint8_t worker_id)
115 {
116         return 0;
117 }
118
119 static int
120 worker_detach(__rte_unused struct rte_cryptodev *dev,
121                 __rte_unused uint8_t worker_id)
122 {
123         return 0;
124 }
125
126 static int
127 scheduler_start(struct rte_cryptodev *dev)
128 {
129         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
130         uint16_t i;
131
132         if (sched_ctx->reordering_enabled) {
133                 dev->enqueue_burst = &schedule_enqueue_ordering;
134                 dev->dequeue_burst = &schedule_dequeue_ordering;
135         } else {
136                 dev->enqueue_burst = &schedule_enqueue;
137                 dev->dequeue_burst = &schedule_dequeue;
138         }
139
140         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
141                 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
142                 struct rr_scheduler_qp_ctx *rr_qp_ctx =
143                                 qp_ctx->private_qp_ctx;
144                 uint32_t j;
145
146                 memset(rr_qp_ctx->workers, 0,
147                                 RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
148                                 sizeof(struct scheduler_worker));
149                 for (j = 0; j < sched_ctx->nb_workers; j++) {
150                         rr_qp_ctx->workers[j].dev_id =
151                                         sched_ctx->workers[j].dev_id;
152                         rr_qp_ctx->workers[j].qp_id = i;
153                 }
154
155                 rr_qp_ctx->nb_workers = sched_ctx->nb_workers;
156
157                 rr_qp_ctx->last_enq_worker_idx = 0;
158                 rr_qp_ctx->last_deq_worker_idx = 0;
159         }
160
161         return 0;
162 }
163
164 static int
165 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
166 {
167         return 0;
168 }
169
170 static int
171 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
172 {
173         struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
174         struct rr_scheduler_qp_ctx *rr_qp_ctx;
175
176         rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
177                         rte_socket_id());
178         if (!rr_qp_ctx) {
179                 CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
180                 return -ENOMEM;
181         }
182
183         qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
184
185         return 0;
186 }
187
188 static int
189 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
190 {
191         return 0;
192 }
193
194 static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
195         worker_attach,
196         worker_detach,
197         scheduler_start,
198         scheduler_stop,
199         scheduler_config_qp,
200         scheduler_create_private_ctx,
201         NULL,   /* option_set */
202         NULL    /* option_get */
203 };
204
205 static struct rte_cryptodev_scheduler scheduler = {
206                 .name = "roundrobin-scheduler",
207                 .description = "scheduler which will round robin burst across "
208                                 "worker crypto devices",
209                 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
210                 .ops = &scheduler_rr_ops
211 };
212
213 struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler;