net/txgbe: fix queue statistics mapping
[dpdk.git] / drivers / crypto / null / null_crypto_pmd_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <string.h>
6
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <cryptodev_pmd.h>
10
11 #include "null_crypto_pmd_private.h"
12
13 static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] = {
14         {       /* NULL (AUTH) */
15                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16                 {.sym = {
17                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
18                         {.auth = {
19                                 .algo = RTE_CRYPTO_AUTH_NULL,
20                                 .block_size = 1,
21                                 .key_size = {
22                                         .min = 0,
23                                         .max = 0,
24                                         .increment = 0
25                                 },
26                                 .digest_size = {
27                                         .min = 0,
28                                         .max = 0,
29                                         .increment = 0
30                                 },
31                                 .iv_size = { 0 }
32                         }, },
33                 }, },
34         },
35         {       /* NULL (CIPHER) */
36                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
37                 {.sym = {
38                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
39                         {.cipher = {
40                                 .algo = RTE_CRYPTO_CIPHER_NULL,
41                                 .block_size = 1,
42                                 .key_size = {
43                                         .min = 0,
44                                         .max = 0,
45                                         .increment = 0
46                                 },
47                                 .iv_size = { 0 }
48                         }, },
49                 }, }
50         },
51         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
52 };
53
54 /** Configure device */
55 static int
56 null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
57                 __rte_unused struct rte_cryptodev_config *config)
58 {
59         return 0;
60 }
61
62 /** Start device */
63 static int
64 null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
65 {
66         return 0;
67 }
68
69 /** Stop device */
70 static void
71 null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
72 {
73 }
74
75 /** Close device */
76 static int
77 null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
78 {
79         return 0;
80 }
81
82 /** Get device statistics */
83 static void
84 null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
85                 struct rte_cryptodev_stats *stats)
86 {
87         int qp_id;
88
89         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
90                 struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
91
92                 stats->enqueued_count += qp->qp_stats.enqueued_count;
93                 stats->dequeued_count += qp->qp_stats.dequeued_count;
94
95                 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
96                 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
97         }
98 }
99
100 /** Reset device statistics */
101 static void
102 null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
103 {
104         int qp_id;
105
106         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
107                 struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
108
109                 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
110         }
111 }
112
113
114 /** Get device info */
115 static void
116 null_crypto_pmd_info_get(struct rte_cryptodev *dev,
117                 struct rte_cryptodev_info *dev_info)
118 {
119         struct null_crypto_private *internals = dev->data->dev_private;
120
121         if (dev_info != NULL) {
122                 dev_info->driver_id = dev->driver_id;
123                 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
124                 /* No limit of number of sessions */
125                 dev_info->sym.max_nb_sessions = 0;
126                 dev_info->feature_flags = dev->feature_flags;
127                 dev_info->capabilities = null_crypto_pmd_capabilities;
128         }
129 }
130
131 /** Release queue pair */
132 static int
133 null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
134 {
135         if (dev->data->queue_pairs[qp_id] != NULL) {
136                 struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
137
138                 rte_ring_free(qp->processed_pkts);
139
140                 rte_free(dev->data->queue_pairs[qp_id]);
141                 dev->data->queue_pairs[qp_id] = NULL;
142         }
143         return 0;
144 }
145
146 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
147 static int
148 null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
149                 struct null_crypto_qp *qp)
150 {
151         unsigned n = snprintf(qp->name, sizeof(qp->name),
152                         "null_crypto_pmd_%u_qp_%u",
153                         dev->data->dev_id, qp->id);
154
155         if (n >= sizeof(qp->name))
156                 return -1;
157
158         return 0;
159 }
160
161 /** Create a ring to place process packets on */
162 static struct rte_ring *
163 null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
164                 unsigned ring_size, int socket_id)
165 {
166         struct rte_ring *r;
167
168         r = rte_ring_lookup(qp->name);
169         if (r) {
170                 if (rte_ring_get_size(r) >= ring_size) {
171                         NULL_LOG(INFO,
172                                         "Reusing existing ring %s for "
173                                         " processed packets", qp->name);
174                         return r;
175                 }
176
177                 NULL_LOG(INFO,
178                                 "Unable to reuse existing ring %s for "
179                                 " processed packets", qp->name);
180                 return NULL;
181         }
182
183         return rte_ring_create(qp->name, ring_size, socket_id,
184                         RING_F_SP_ENQ | RING_F_SC_DEQ);
185 }
186
187 /** Setup a queue pair */
188 static int
189 null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
190                 const struct rte_cryptodev_qp_conf *qp_conf,
191                 int socket_id)
192 {
193         struct null_crypto_private *internals = dev->data->dev_private;
194         struct null_crypto_qp *qp;
195         int retval;
196
197         if (qp_id >= internals->max_nb_qpairs) {
198                 NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum "
199                                 "number of queue pairs supported (%u).",
200                                 qp_id, internals->max_nb_qpairs);
201                 return (-EINVAL);
202         }
203
204         /* Free memory prior to re-allocation if needed. */
205         if (dev->data->queue_pairs[qp_id] != NULL)
206                 null_crypto_pmd_qp_release(dev, qp_id);
207
208         /* Allocate the queue pair data structure. */
209         qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
210                                         RTE_CACHE_LINE_SIZE, socket_id);
211         if (qp == NULL) {
212                 NULL_LOG(ERR, "Failed to allocate queue pair memory");
213                 return (-ENOMEM);
214         }
215
216         qp->id = qp_id;
217         dev->data->queue_pairs[qp_id] = qp;
218
219         retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
220         if (retval) {
221                 NULL_LOG(ERR, "Failed to create unique name for null "
222                                 "crypto device");
223
224                 goto qp_setup_cleanup;
225         }
226
227         qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
228                         qp_conf->nb_descriptors, socket_id);
229         if (qp->processed_pkts == NULL) {
230                 NULL_LOG(ERR, "Failed to create unique name for null "
231                                 "crypto device");
232                 goto qp_setup_cleanup;
233         }
234
235         qp->sess_mp = qp_conf->mp_session;
236         qp->sess_mp_priv = qp_conf->mp_session_private;
237
238         memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
239
240         return 0;
241
242 qp_setup_cleanup:
243         rte_free(qp);
244
245         return -1;
246 }
247
248 /** Returns the size of the NULL crypto session structure */
249 static unsigned
250 null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
251 {
252         return sizeof(struct null_crypto_session);
253 }
254
255 /** Configure a null crypto session from a crypto xform chain */
256 static int
257 null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
258                 struct rte_crypto_sym_xform *xform,
259                 struct rte_cryptodev_sym_session *sess,
260                 struct rte_mempool *mp)
261 {
262         void *sess_private_data;
263         int ret;
264
265         if (unlikely(sess == NULL)) {
266                 NULL_LOG(ERR, "invalid session struct");
267                 return -EINVAL;
268         }
269
270         if (rte_mempool_get(mp, &sess_private_data)) {
271                 NULL_LOG(ERR,
272                                 "Couldn't get object from session mempool");
273                 return -ENOMEM;
274         }
275
276         ret = null_crypto_set_session_parameters(sess_private_data, xform);
277         if (ret != 0) {
278                 NULL_LOG(ERR, "failed configure session parameters");
279
280                 /* Return session to mempool */
281                 rte_mempool_put(mp, sess_private_data);
282                 return ret;
283         }
284
285         set_sym_session_private_data(sess, dev->driver_id,
286                 sess_private_data);
287
288         return 0;
289 }
290
291 /** Clear the memory of session so it doesn't leave key material behind */
292 static void
293 null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
294                 struct rte_cryptodev_sym_session *sess)
295 {
296         uint8_t index = dev->driver_id;
297         void *sess_priv = get_sym_session_private_data(sess, index);
298
299         /* Zero out the whole structure */
300         if (sess_priv) {
301                 memset(sess_priv, 0, sizeof(struct null_crypto_session));
302                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
303                 set_sym_session_private_data(sess, index, NULL);
304                 rte_mempool_put(sess_mp, sess_priv);
305         }
306 }
307
308 static struct rte_cryptodev_ops pmd_ops = {
309                 .dev_configure          = null_crypto_pmd_config,
310                 .dev_start              = null_crypto_pmd_start,
311                 .dev_stop               = null_crypto_pmd_stop,
312                 .dev_close              = null_crypto_pmd_close,
313
314                 .stats_get              = null_crypto_pmd_stats_get,
315                 .stats_reset            = null_crypto_pmd_stats_reset,
316
317                 .dev_infos_get          = null_crypto_pmd_info_get,
318
319                 .queue_pair_setup       = null_crypto_pmd_qp_setup,
320                 .queue_pair_release     = null_crypto_pmd_qp_release,
321
322                 .sym_session_get_size   = null_crypto_pmd_sym_session_get_size,
323                 .sym_session_configure  = null_crypto_pmd_sym_session_configure,
324                 .sym_session_clear      = null_crypto_pmd_sym_session_clear
325 };
326
327 struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;