crypto/ccp: support AES
[dpdk.git] / drivers / crypto / ccp / ccp_pmd_ops.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <string.h>
6
7 #include <rte_common.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_malloc.h>
10
11 #include "ccp_pmd_private.h"
12 #include "ccp_dev.h"
13 #include "ccp_crypto.h"
14
15 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
16         {       /* AES ECB */
17                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18                 {.sym = {
19                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
20                         {.cipher = {
21                                 .algo = RTE_CRYPTO_CIPHER_AES_ECB,
22                                 .block_size = 16,
23                                 .key_size = {
24                                    .min = 16,
25                                    .max = 32,
26                                    .increment = 8
27                                 },
28                                 .iv_size = {
29                                    .min = 0,
30                                    .max = 0,
31                                    .increment = 0
32                                 }
33                         }, }
34                 }, }
35         },
36         {       /* AES CBC */
37                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
38                 {.sym = {
39                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
40                         {.cipher = {
41                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
42                                 .block_size = 16,
43                                 .key_size = {
44                                         .min = 16,
45                                         .max = 32,
46                                         .increment = 8
47                                 },
48                                 .iv_size = {
49                                         .min = 16,
50                                         .max = 16,
51                                         .increment = 0
52                                 }
53                         }, }
54                 }, }
55         },
56         {       /* AES CTR */
57                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
58                 {.sym = {
59                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
60                         {.cipher = {
61                                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
62                                 .block_size = 16,
63                                 .key_size = {
64                                         .min = 16,
65                                         .max = 32,
66                                         .increment = 8
67                                 },
68                                 .iv_size = {
69                                         .min = 16,
70                                         .max = 16,
71                                         .increment = 0
72                                 }
73                         }, }
74                 }, }
75         },
76         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
77 };
78
79 static int
80 ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
81                struct rte_cryptodev_config *config __rte_unused)
82 {
83         return 0;
84 }
85
86 static int
87 ccp_pmd_start(struct rte_cryptodev *dev)
88 {
89         return ccp_dev_start(dev);
90 }
91
92 static void
93 ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
94 {
95
96 }
97
98 static int
99 ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
100 {
101         return 0;
102 }
103
104 static void
105 ccp_pmd_stats_get(struct rte_cryptodev *dev,
106                   struct rte_cryptodev_stats *stats)
107 {
108         int qp_id;
109
110         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
111                 struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
112
113                 stats->enqueued_count += qp->qp_stats.enqueued_count;
114                 stats->dequeued_count += qp->qp_stats.dequeued_count;
115
116                 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
117                 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
118         }
119
120 }
121
122 static void
123 ccp_pmd_stats_reset(struct rte_cryptodev *dev)
124 {
125         int qp_id;
126
127         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
128                 struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
129
130                 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
131         }
132 }
133
134 static void
135 ccp_pmd_info_get(struct rte_cryptodev *dev,
136                  struct rte_cryptodev_info *dev_info)
137 {
138         struct ccp_private *internals = dev->data->dev_private;
139
140         if (dev_info != NULL) {
141                 dev_info->driver_id = dev->driver_id;
142                 dev_info->feature_flags = dev->feature_flags;
143                 dev_info->capabilities = ccp_pmd_capabilities;
144                 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
145                 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
146         }
147 }
148
149 static int
150 ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
151 {
152         struct ccp_qp *qp;
153
154         if (dev->data->queue_pairs[qp_id] != NULL) {
155                 qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
156                 rte_ring_free(qp->processed_pkts);
157                 rte_mempool_free(qp->batch_mp);
158                 rte_free(qp);
159                 dev->data->queue_pairs[qp_id] = NULL;
160         }
161         return 0;
162 }
163
164 static int
165 ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
166                 struct ccp_qp *qp)
167 {
168         unsigned int n = snprintf(qp->name, sizeof(qp->name),
169                         "ccp_pmd_%u_qp_%u",
170                         dev->data->dev_id, qp->id);
171
172         if (n > sizeof(qp->name))
173                 return -1;
174
175         return 0;
176 }
177
178 static struct rte_ring *
179 ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
180                                   unsigned int ring_size, int socket_id)
181 {
182         struct rte_ring *r;
183
184         r = rte_ring_lookup(qp->name);
185         if (r) {
186                 if (r->size >= ring_size) {
187                         CCP_LOG_INFO(
188                                 "Reusing ring %s for processed packets",
189                                 qp->name);
190                         return r;
191                 }
192                 CCP_LOG_INFO(
193                         "Unable to reuse ring %s for processed packets",
194                          qp->name);
195                 return NULL;
196         }
197
198         return rte_ring_create(qp->name, ring_size, socket_id,
199                         RING_F_SP_ENQ | RING_F_SC_DEQ);
200 }
201
202 static int
203 ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
204                  const struct rte_cryptodev_qp_conf *qp_conf,
205                  int socket_id, struct rte_mempool *session_pool)
206 {
207         struct ccp_private *internals = dev->data->dev_private;
208         struct ccp_qp *qp;
209         int retval = 0;
210
211         if (qp_id >= internals->max_nb_qpairs) {
212                 CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
213                             qp_id, internals->max_nb_qpairs);
214                 return (-EINVAL);
215         }
216
217         /* Free memory prior to re-allocation if needed. */
218         if (dev->data->queue_pairs[qp_id] != NULL)
219                 ccp_pmd_qp_release(dev, qp_id);
220
221         /* Allocate the queue pair data structure. */
222         qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
223                                         RTE_CACHE_LINE_SIZE, socket_id);
224         if (qp == NULL) {
225                 CCP_LOG_ERR("Failed to allocate queue pair memory");
226                 return (-ENOMEM);
227         }
228
229         qp->dev = dev;
230         qp->id = qp_id;
231         dev->data->queue_pairs[qp_id] = qp;
232
233         retval = ccp_pmd_qp_set_unique_name(dev, qp);
234         if (retval) {
235                 CCP_LOG_ERR("Failed to create unique name for ccp qp");
236                 goto qp_setup_cleanup;
237         }
238
239         qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
240                         qp_conf->nb_descriptors, socket_id);
241         if (qp->processed_pkts == NULL) {
242                 CCP_LOG_ERR("Failed to create batch info ring");
243                 goto qp_setup_cleanup;
244         }
245
246         qp->sess_mp = session_pool;
247
248         /* mempool for batch info */
249         qp->batch_mp = rte_mempool_create(
250                                 qp->name,
251                                 qp_conf->nb_descriptors,
252                                 sizeof(struct ccp_batch_info),
253                                 RTE_CACHE_LINE_SIZE,
254                                 0, NULL, NULL, NULL, NULL,
255                                 SOCKET_ID_ANY, 0);
256         if (qp->batch_mp == NULL)
257                 goto qp_setup_cleanup;
258         memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
259         return 0;
260
261 qp_setup_cleanup:
262         dev->data->queue_pairs[qp_id] = NULL;
263         if (qp)
264                 rte_free(qp);
265         return -1;
266 }
267
268 static int
269 ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
270                  uint16_t queue_pair_id __rte_unused)
271 {
272         return -ENOTSUP;
273 }
274
275 static int
276 ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
277                 uint16_t queue_pair_id __rte_unused)
278 {
279         return -ENOTSUP;
280 }
281
282 static uint32_t
283 ccp_pmd_qp_count(struct rte_cryptodev *dev)
284 {
285         return dev->data->nb_queue_pairs;
286 }
287
288 static unsigned
289 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
290 {
291         return sizeof(struct ccp_session);
292 }
293
294 static int
295 ccp_pmd_session_configure(struct rte_cryptodev *dev,
296                           struct rte_crypto_sym_xform *xform,
297                           struct rte_cryptodev_sym_session *sess,
298                           struct rte_mempool *mempool)
299 {
300         int ret;
301         void *sess_private_data;
302
303         if (unlikely(sess == NULL || xform == NULL)) {
304                 CCP_LOG_ERR("Invalid session struct or xform");
305                 return -ENOMEM;
306         }
307
308         if (rte_mempool_get(mempool, &sess_private_data)) {
309                 CCP_LOG_ERR("Couldn't get object from session mempool");
310                 return -ENOMEM;
311         }
312         ret = ccp_set_session_parameters(sess_private_data, xform);
313         if (ret != 0) {
314                 CCP_LOG_ERR("failed configure session parameters");
315
316                 /* Return session to mempool */
317                 rte_mempool_put(mempool, sess_private_data);
318                 return ret;
319         }
320         set_session_private_data(sess, dev->driver_id,
321                                  sess_private_data);
322
323         return 0;
324 }
325
326 static void
327 ccp_pmd_session_clear(struct rte_cryptodev *dev,
328                       struct rte_cryptodev_sym_session *sess)
329 {
330         uint8_t index = dev->driver_id;
331         void *sess_priv = get_session_private_data(sess, index);
332
333         if (sess_priv) {
334                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
335
336                 rte_mempool_put(sess_mp, sess_priv);
337                 memset(sess_priv, 0, sizeof(struct ccp_session));
338                 set_session_private_data(sess, index, NULL);
339         }
340 }
341
342 struct rte_cryptodev_ops ccp_ops = {
343                 .dev_configure          = ccp_pmd_config,
344                 .dev_start              = ccp_pmd_start,
345                 .dev_stop               = ccp_pmd_stop,
346                 .dev_close              = ccp_pmd_close,
347
348                 .stats_get              = ccp_pmd_stats_get,
349                 .stats_reset            = ccp_pmd_stats_reset,
350
351                 .dev_infos_get          = ccp_pmd_info_get,
352
353                 .queue_pair_setup       = ccp_pmd_qp_setup,
354                 .queue_pair_release     = ccp_pmd_qp_release,
355                 .queue_pair_start       = ccp_pmd_qp_start,
356                 .queue_pair_stop        = ccp_pmd_qp_stop,
357                 .queue_pair_count       = ccp_pmd_qp_count,
358
359                 .session_get_size       = ccp_pmd_session_get_size,
360                 .session_configure      = ccp_pmd_session_configure,
361                 .session_clear          = ccp_pmd_session_clear,
362 };
363
364 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;