build: check AVX512 rather than binutils version
[dpdk.git] / drivers / crypto / scheduler / scheduler_failover.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_malloc.h>
7
8 #include "rte_cryptodev_scheduler_operations.h"
9 #include "scheduler_pmd_private.h"
10
11 #define PRIMARY_SLAVE_IDX       0
12 #define SECONDARY_SLAVE_IDX     1
13 #define NB_FAILOVER_SLAVES      2
14 #define SLAVE_SWITCH_MASK       (0x01)
15
16 struct fo_scheduler_qp_ctx {
17         struct scheduler_slave primary_slave;
18         struct scheduler_slave secondary_slave;
19
20         uint8_t deq_idx;
21 };
22
23 static __rte_always_inline uint16_t
24 failover_slave_enqueue(struct scheduler_slave *slave,
25                 struct rte_crypto_op **ops, uint16_t nb_ops)
26 {
27         uint16_t i, processed_ops;
28
29         for (i = 0; i < nb_ops && i < 4; i++)
30                 rte_prefetch0(ops[i]->sym->session);
31
32         processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
33                         slave->qp_id, ops, nb_ops);
34         slave->nb_inflight_cops += processed_ops;
35
36         return processed_ops;
37 }
38
39 static uint16_t
40 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
41 {
42         struct fo_scheduler_qp_ctx *qp_ctx =
43                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
44         uint16_t enqueued_ops;
45
46         if (unlikely(nb_ops == 0))
47                 return 0;
48
49         enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
50                         ops, nb_ops);
51
52         if (enqueued_ops < nb_ops)
53                 enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
54                                 &ops[enqueued_ops],
55                                 nb_ops - enqueued_ops);
56
57         return enqueued_ops;
58 }
59
60
61 static uint16_t
62 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
63                 uint16_t nb_ops)
64 {
65         struct rte_ring *order_ring =
66                         ((struct scheduler_qp_ctx *)qp)->order_ring;
67         uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
68                         nb_ops);
69         uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
70                         nb_ops_to_enq);
71
72         scheduler_order_insert(order_ring, ops, nb_ops_enqd);
73
74         return nb_ops_enqd;
75 }
76
77 static uint16_t
78 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
79 {
80         struct fo_scheduler_qp_ctx *qp_ctx =
81                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
82         struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
83                         &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
84         struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
85         uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
86
87         if (slave->nb_inflight_cops) {
88                 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
89                         slave->qp_id, ops, nb_ops);
90                 slave->nb_inflight_cops -= nb_deq_ops;
91         }
92
93         qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
94
95         if (nb_deq_ops == nb_ops)
96                 return nb_deq_ops;
97
98         slave = slaves[qp_ctx->deq_idx];
99
100         if (slave->nb_inflight_cops) {
101                 nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
102                         slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
103                 slave->nb_inflight_cops -= nb_deq_ops2;
104         }
105
106         return nb_deq_ops + nb_deq_ops2;
107 }
108
109 static uint16_t
110 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
111                 uint16_t nb_ops)
112 {
113         struct rte_ring *order_ring =
114                         ((struct scheduler_qp_ctx *)qp)->order_ring;
115
116         schedule_dequeue(qp, ops, nb_ops);
117
118         return scheduler_order_drain(order_ring, ops, nb_ops);
119 }
120
121 static int
122 slave_attach(__rte_unused struct rte_cryptodev *dev,
123                 __rte_unused uint8_t slave_id)
124 {
125         return 0;
126 }
127
128 static int
129 slave_detach(__rte_unused struct rte_cryptodev *dev,
130                 __rte_unused uint8_t slave_id)
131 {
132         return 0;
133 }
134
135 static int
136 scheduler_start(struct rte_cryptodev *dev)
137 {
138         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
139         uint16_t i;
140
141         if (sched_ctx->nb_slaves < 2) {
142                 CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
143                 return -ENOMEM;
144         }
145
146         if (sched_ctx->reordering_enabled) {
147                 dev->enqueue_burst = schedule_enqueue_ordering;
148                 dev->dequeue_burst = schedule_dequeue_ordering;
149         } else {
150                 dev->enqueue_burst = schedule_enqueue;
151                 dev->dequeue_burst = schedule_dequeue;
152         }
153
154         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
155                 struct fo_scheduler_qp_ctx *qp_ctx =
156                         ((struct scheduler_qp_ctx *)
157                                 dev->data->queue_pairs[i])->private_qp_ctx;
158
159                 rte_memcpy(&qp_ctx->primary_slave,
160                                 &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
161                                 sizeof(struct scheduler_slave));
162                 rte_memcpy(&qp_ctx->secondary_slave,
163                                 &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
164                                 sizeof(struct scheduler_slave));
165         }
166
167         return 0;
168 }
169
170 static int
171 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
172 {
173         return 0;
174 }
175
176 static int
177 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
178 {
179         struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
180         struct fo_scheduler_qp_ctx *fo_qp_ctx;
181
182         fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
183                         rte_socket_id());
184         if (!fo_qp_ctx) {
185                 CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
186                 return -ENOMEM;
187         }
188
189         qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
190
191         return 0;
192 }
193
194 static int
195 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
196 {
197         return 0;
198 }
199
200 static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
201         slave_attach,
202         slave_detach,
203         scheduler_start,
204         scheduler_stop,
205         scheduler_config_qp,
206         scheduler_create_private_ctx,
207         NULL,   /* option_set */
208         NULL    /*option_get */
209 };
210
211 static struct rte_cryptodev_scheduler fo_scheduler = {
212                 .name = "failover-scheduler",
213                 .description = "scheduler which enqueues to the primary slave, "
214                                 "and only then enqueues to the secondary slave "
215                                 "upon failing on enqueuing to primary",
216                 .mode = CDEV_SCHED_MODE_FAILOVER,
217                 .ops = &scheduler_fo_ops
218 };
219
220 struct rte_cryptodev_scheduler *crypto_scheduler_failover = &fo_scheduler;