crypto/ipsec_mb: move kasumi PMD
[dpdk.git] / drivers / crypto / ipsec_mb / ipsec_mb_private.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
11
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
16 #endif
17
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
20
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
23
24 enum ipsec_mb_vector_mode {
25         IPSEC_MB_NOT_SUPPORTED = 0,
26         IPSEC_MB_SSE,
27         IPSEC_MB_AVX,
28         IPSEC_MB_AVX2,
29         IPSEC_MB_AVX512
30 };
31
32 extern enum ipsec_mb_vector_mode vector_mode;
33
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
36
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
39
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
42
43 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
44 /**< IPSEC Multi buffer PMD kasumi device name */
45
46 /** PMD LOGTYPE DRIVER, common to all PMDs */
47 extern int ipsec_mb_logtype_driver;
48 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
49         rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
50                 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
51
52 /** All supported device types */
53 enum ipsec_mb_pmd_types {
54         IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
55         IPSEC_MB_PMD_TYPE_AESNI_GCM,
56         IPSEC_MB_PMD_TYPE_KASUMI,
57         IPSEC_MB_N_PMD_TYPES
58 };
59
60 /** Crypto operations */
61 enum ipsec_mb_operation {
62         IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
63         IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
64         IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
65         IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
66         IPSEC_MB_OP_ENCRYPT_ONLY,
67         IPSEC_MB_OP_DECRYPT_ONLY,
68         IPSEC_MB_OP_HASH_GEN_ONLY,
69         IPSEC_MB_OP_HASH_VERIFY_ONLY,
70         IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
71         IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
72         IPSEC_MB_OP_NOT_SUPPORTED
73 };
74
75 extern uint8_t pmd_driver_id_aesni_mb;
76 extern uint8_t pmd_driver_id_aesni_gcm;
77 extern uint8_t pmd_driver_id_kasumi;
78
79 /** Helper function. Gets driver ID based on PMD type */
80 static __rte_always_inline uint8_t
81 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
82 {
83         switch (pmd_type) {
84         case IPSEC_MB_PMD_TYPE_AESNI_MB:
85                 return pmd_driver_id_aesni_mb;
86         case IPSEC_MB_PMD_TYPE_AESNI_GCM:
87                 return pmd_driver_id_aesni_gcm;
88         case IPSEC_MB_PMD_TYPE_KASUMI:
89                 return pmd_driver_id_kasumi;
90         default:
91                 break;
92         }
93         return UINT8_MAX;
94 }
95
96 /** Common private data structure for each PMD */
97 struct ipsec_mb_dev_private {
98         enum ipsec_mb_pmd_types pmd_type;
99         /**< PMD  type */
100         uint32_t max_nb_queue_pairs;
101         /**< Max number of queue pairs supported by device */
102         __extension__ uint8_t priv[0];
103 };
104
105 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
106 struct ipsec_mb_qp {
107         uint16_t id;
108         /**< Queue Pair Identifier */
109         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
110         struct rte_ring *ingress_queue;
111         /**< Ring for placing operations ready for processing */
112         struct rte_mempool *sess_mp;
113         /**< Session Mempool */
114         struct rte_mempool *sess_mp_priv;
115         /**< Session Private Data Mempool */
116         struct rte_cryptodev_stats stats;
117         /**< Queue pair statistics */
118         enum ipsec_mb_pmd_types pmd_type;
119         /**< pmd type */
120         uint8_t digest_idx;
121         /**< Index of the next
122          * slot to be used in temp_digests,
123          * to store the digest for a given operation
124          */
125         IMB_MGR *mb_mgr;
126         /* Multi buffer manager */
127         const struct rte_memzone *mb_mgr_mz;
128         /* Shared memzone for storing mb_mgr */
129         __extension__ uint8_t additional_data[0];
130         /**< Storing PMD specific additional data */
131 };
132
133 static __rte_always_inline void *
134 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
135 {
136         return (void *)qp->additional_data;
137 }
138
139 /** Helper function. Allocates job manager */
140 static __rte_always_inline IMB_MGR *
141 alloc_init_mb_mgr(void)
142 {
143         IMB_MGR *mb_mgr = alloc_mb_mgr(0);
144
145         if (unlikely(mb_mgr == NULL)) {
146                 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
147                 return NULL;
148         }
149
150         init_mb_mgr_auto(mb_mgr, NULL);
151
152         return mb_mgr;
153 }
154
155 /** Helper function. Gets per thread job manager */
156 static __rte_always_inline IMB_MGR *
157 get_per_thread_mb_mgr(void)
158 {
159         if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
160                 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
161
162         return RTE_PER_LCORE(mb_mgr);
163 }
164
165 /** Helper function. Gets mode and chained xforms from the xform */
166 static __rte_always_inline int
167 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
168                         enum ipsec_mb_operation *mode,
169                         const struct rte_crypto_sym_xform **auth_xform,
170                         const struct rte_crypto_sym_xform **cipher_xform,
171                         const struct rte_crypto_sym_xform **aead_xform)
172 {
173         const struct rte_crypto_sym_xform *next = xform->next;
174
175         if (xform == NULL) {
176                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
177                 return -ENOTSUP;
178         }
179
180         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
181                 if (next == NULL) {
182                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
183                                 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
184                                 *cipher_xform = xform;
185                                 *auth_xform = NULL;
186                                 return 0;
187                         }
188                         *mode = IPSEC_MB_OP_DECRYPT_ONLY;
189                         *cipher_xform = xform;
190                         *auth_xform = NULL;
191                         return 0;
192                 }
193
194                 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
195                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
196                         return -ENOTSUP;
197                 }
198
199                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
200                         if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
201                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
202                                 return -ENOTSUP;
203                         }
204
205                         *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
206                         *cipher_xform = xform;
207                         *auth_xform = xform->next;
208                         return 0;
209                 }
210                 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
211                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
212                         return -ENOTSUP;
213                 }
214
215                 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
216                 *cipher_xform = xform;
217                 *auth_xform = xform->next;
218                 return 0;
219         }
220
221         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
222                 if (next == NULL) {
223                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
224                                 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
225                                 *auth_xform = xform;
226                                 *cipher_xform = NULL;
227                                 return 0;
228                         }
229                         *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
230                         *auth_xform = xform;
231                         *cipher_xform = NULL;
232                         return 0;
233                 }
234
235                 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
236                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
237                         return -ENOTSUP;
238                 }
239
240                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
241                         if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
242                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
243                                 return -ENOTSUP;
244                         }
245
246                         *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
247                         *auth_xform = xform;
248                         *cipher_xform = xform->next;
249                         return 0;
250                 }
251                 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
252                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
253                         return -ENOTSUP;
254                 }
255
256                 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
257                 *auth_xform = xform;
258                 *cipher_xform = xform->next;
259                 return 0;
260         }
261
262         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
263                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
264                         /*
265                          * CCM requires to hash first and cipher later
266                          * when encrypting
267                          */
268                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
269                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
270                                 *aead_xform = xform;
271                                 return 0;
272                                 } else {
273                                         *mode =
274                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
275                                         *aead_xform = xform;
276                                         return 0;
277                                 }
278                 } else {
279                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
280                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
281                                 *aead_xform = xform;
282                                 return 0;
283                         }
284                         *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
285                         *aead_xform = xform;
286                         return 0;
287                 }
288         }
289
290         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
291         return -ENOTSUP;
292 }
293
294 /** Device creation function */
295 int
296 ipsec_mb_create(struct rte_vdev_device *vdev,
297         enum ipsec_mb_pmd_types pmd_type);
298
299 /** Device remove function */
300 int
301 ipsec_mb_remove(struct rte_vdev_device *vdev);
302
303 /** Configure queue pair PMD type specific data */
304 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
305
306 /** Configure session PMD type specific data */
307 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
308                 void *session_private,
309                 const struct rte_crypto_sym_xform *xform);
310
311 /** Configure internals PMD type specific data */
312 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
313
314 /** Per PMD type operation and data */
315 struct ipsec_mb_internals {
316         uint8_t is_configured;
317         dequeue_pkt_burst_t dequeue_burst;
318         ipsec_mb_dev_configure_t dev_config;
319         ipsec_mb_queue_pair_configure_t queue_pair_configure;
320         ipsec_mb_session_configure_t session_configure;
321         const struct rte_cryptodev_capabilities *caps;
322         struct rte_cryptodev_ops *ops;
323         struct rte_security_ops *security_ops;
324         uint64_t feature_flags;
325         uint32_t session_priv_size;
326         uint32_t qp_priv_size;
327         uint32_t internals_priv_size;
328 };
329
330 /** Global PMD type specific data */
331 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
332
333 int
334 ipsec_mb_config(struct rte_cryptodev *dev,
335         struct rte_cryptodev_config *config);
336
337 int
338 ipsec_mb_start(struct rte_cryptodev *dev);
339
340 void
341 ipsec_mb_stop(struct rte_cryptodev *dev);
342
343 int
344 ipsec_mb_close(struct rte_cryptodev *dev);
345
346 void
347 ipsec_mb_stats_get(struct rte_cryptodev *dev,
348                 struct rte_cryptodev_stats *stats);
349
350 void
351 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
352
353 void
354 ipsec_mb_info_get(struct rte_cryptodev *dev,
355                 struct rte_cryptodev_info *dev_info);
356
357 int
358 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
359
360 int
361 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
362
363 int
364 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
365                                  const struct rte_cryptodev_qp_conf *qp_conf,
366                                  int socket_id);
367
368 /** Returns the size of the aesni multi-buffer session structure */
369 unsigned
370 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
371
372 /** Configure an aesni multi-buffer session from a crypto xform chain */
373 int ipsec_mb_sym_session_configure(
374         struct rte_cryptodev *dev,
375         struct rte_crypto_sym_xform *xform,
376         struct rte_cryptodev_sym_session *sess,
377         struct rte_mempool *mempool);
378
379 /** Clear the memory of session so it does not leave key material behind */
380 void
381 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
382                                 struct rte_cryptodev_sym_session *sess);
383
384 /** Get session from op. If sessionless create a session */
385 static __rte_always_inline void *
386 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
387 {
388         void *sess = NULL;
389         uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
390         struct rte_crypto_sym_op *sym_op = op->sym;
391         uint8_t sess_type = op->sess_type;
392         void *_sess;
393         void *_sess_private_data = NULL;
394         struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
395
396         switch (sess_type) {
397         case RTE_CRYPTO_OP_WITH_SESSION:
398                 if (likely(sym_op->session != NULL))
399                         sess = get_sym_session_private_data(sym_op->session,
400                                                             driver_id);
401         break;
402         case RTE_CRYPTO_OP_SESSIONLESS:
403                 if (!qp->sess_mp ||
404                     rte_mempool_get(qp->sess_mp, (void **)&_sess))
405                         return NULL;
406
407                 if (!qp->sess_mp_priv ||
408                     rte_mempool_get(qp->sess_mp_priv,
409                                         (void **)&_sess_private_data))
410                         return NULL;
411
412                 sess = _sess_private_data;
413                 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
414                                 sess, sym_op->xform) != 0)) {
415                         rte_mempool_put(qp->sess_mp, _sess);
416                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
417                         sess = NULL;
418                 }
419
420                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
421                 set_sym_session_private_data(sym_op->session, driver_id,
422                                              _sess_private_data);
423         break;
424         default:
425                 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
426         }
427
428         if (unlikely(sess == NULL))
429                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
430
431         return sess;
432 }
433
434 #endif /* _IPSEC_MB_PRIVATE_H_ */