crypto/ipsec_mb: move zuc PMD
[dpdk.git] / drivers / crypto / ipsec_mb / ipsec_mb_private.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
11
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
16 #endif
17
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
20
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
23
24 enum ipsec_mb_vector_mode {
25         IPSEC_MB_NOT_SUPPORTED = 0,
26         IPSEC_MB_SSE,
27         IPSEC_MB_AVX,
28         IPSEC_MB_AVX2,
29         IPSEC_MB_AVX512
30 };
31
32 extern enum ipsec_mb_vector_mode vector_mode;
33
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
36
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
39
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
42
43 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
44 /**< IPSEC Multi buffer PMD kasumi device name */
45
46 #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
47 /**< IPSEC Multi buffer PMD snow3g device name */
48
49 #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
50 /**< IPSEC Multi buffer PMD zuc device name */
51
52 /** PMD LOGTYPE DRIVER, common to all PMDs */
53 extern int ipsec_mb_logtype_driver;
54 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
55         rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
56                 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
57
58 /** All supported device types */
59 enum ipsec_mb_pmd_types {
60         IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
61         IPSEC_MB_PMD_TYPE_AESNI_GCM,
62         IPSEC_MB_PMD_TYPE_KASUMI,
63         IPSEC_MB_PMD_TYPE_SNOW3G,
64         IPSEC_MB_PMD_TYPE_ZUC,
65         IPSEC_MB_N_PMD_TYPES
66 };
67
68 /** Crypto operations */
69 enum ipsec_mb_operation {
70         IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
71         IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
72         IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
73         IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
74         IPSEC_MB_OP_ENCRYPT_ONLY,
75         IPSEC_MB_OP_DECRYPT_ONLY,
76         IPSEC_MB_OP_HASH_GEN_ONLY,
77         IPSEC_MB_OP_HASH_VERIFY_ONLY,
78         IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
79         IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
80         IPSEC_MB_OP_NOT_SUPPORTED
81 };
82
83 extern uint8_t pmd_driver_id_aesni_mb;
84 extern uint8_t pmd_driver_id_aesni_gcm;
85 extern uint8_t pmd_driver_id_kasumi;
86 extern uint8_t pmd_driver_id_snow3g;
87 extern uint8_t pmd_driver_id_zuc;
88
89 /** Helper function. Gets driver ID based on PMD type */
90 static __rte_always_inline uint8_t
91 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
92 {
93         switch (pmd_type) {
94         case IPSEC_MB_PMD_TYPE_AESNI_MB:
95                 return pmd_driver_id_aesni_mb;
96         case IPSEC_MB_PMD_TYPE_AESNI_GCM:
97                 return pmd_driver_id_aesni_gcm;
98         case IPSEC_MB_PMD_TYPE_KASUMI:
99                 return pmd_driver_id_kasumi;
100         case IPSEC_MB_PMD_TYPE_SNOW3G:
101                 return pmd_driver_id_snow3g;
102         case IPSEC_MB_PMD_TYPE_ZUC:
103                 return pmd_driver_id_zuc;
104         default:
105                 break;
106         }
107         return UINT8_MAX;
108 }
109
110 /** Common private data structure for each PMD */
111 struct ipsec_mb_dev_private {
112         enum ipsec_mb_pmd_types pmd_type;
113         /**< PMD  type */
114         uint32_t max_nb_queue_pairs;
115         /**< Max number of queue pairs supported by device */
116         __extension__ uint8_t priv[0];
117 };
118
119 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
120 struct ipsec_mb_qp {
121         uint16_t id;
122         /**< Queue Pair Identifier */
123         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
124         struct rte_ring *ingress_queue;
125         /**< Ring for placing operations ready for processing */
126         struct rte_mempool *sess_mp;
127         /**< Session Mempool */
128         struct rte_mempool *sess_mp_priv;
129         /**< Session Private Data Mempool */
130         struct rte_cryptodev_stats stats;
131         /**< Queue pair statistics */
132         enum ipsec_mb_pmd_types pmd_type;
133         /**< pmd type */
134         uint8_t digest_idx;
135         /**< Index of the next
136          * slot to be used in temp_digests,
137          * to store the digest for a given operation
138          */
139         IMB_MGR *mb_mgr;
140         /* Multi buffer manager */
141         const struct rte_memzone *mb_mgr_mz;
142         /* Shared memzone for storing mb_mgr */
143         __extension__ uint8_t additional_data[0];
144         /**< Storing PMD specific additional data */
145 };
146
147 static __rte_always_inline void *
148 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
149 {
150         return (void *)qp->additional_data;
151 }
152
153 /** Helper function. Allocates job manager */
154 static __rte_always_inline IMB_MGR *
155 alloc_init_mb_mgr(void)
156 {
157         IMB_MGR *mb_mgr = alloc_mb_mgr(0);
158
159         if (unlikely(mb_mgr == NULL)) {
160                 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
161                 return NULL;
162         }
163
164         init_mb_mgr_auto(mb_mgr, NULL);
165
166         return mb_mgr;
167 }
168
169 /** Helper function. Gets per thread job manager */
170 static __rte_always_inline IMB_MGR *
171 get_per_thread_mb_mgr(void)
172 {
173         if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
174                 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
175
176         return RTE_PER_LCORE(mb_mgr);
177 }
178
179 /** Helper function. Gets mode and chained xforms from the xform */
180 static __rte_always_inline int
181 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
182                         enum ipsec_mb_operation *mode,
183                         const struct rte_crypto_sym_xform **auth_xform,
184                         const struct rte_crypto_sym_xform **cipher_xform,
185                         const struct rte_crypto_sym_xform **aead_xform)
186 {
187         const struct rte_crypto_sym_xform *next = xform->next;
188
189         if (xform == NULL) {
190                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
191                 return -ENOTSUP;
192         }
193
194         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
195                 if (next == NULL) {
196                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
197                                 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
198                                 *cipher_xform = xform;
199                                 *auth_xform = NULL;
200                                 return 0;
201                         }
202                         *mode = IPSEC_MB_OP_DECRYPT_ONLY;
203                         *cipher_xform = xform;
204                         *auth_xform = NULL;
205                         return 0;
206                 }
207
208                 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
209                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
210                         return -ENOTSUP;
211                 }
212
213                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
214                         if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
215                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
216                                 return -ENOTSUP;
217                         }
218
219                         *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
220                         *cipher_xform = xform;
221                         *auth_xform = xform->next;
222                         return 0;
223                 }
224                 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
225                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
226                         return -ENOTSUP;
227                 }
228
229                 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
230                 *cipher_xform = xform;
231                 *auth_xform = xform->next;
232                 return 0;
233         }
234
235         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
236                 if (next == NULL) {
237                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
238                                 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
239                                 *auth_xform = xform;
240                                 *cipher_xform = NULL;
241                                 return 0;
242                         }
243                         *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
244                         *auth_xform = xform;
245                         *cipher_xform = NULL;
246                         return 0;
247                 }
248
249                 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
250                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
251                         return -ENOTSUP;
252                 }
253
254                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
255                         if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
256                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
257                                 return -ENOTSUP;
258                         }
259
260                         *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
261                         *auth_xform = xform;
262                         *cipher_xform = xform->next;
263                         return 0;
264                 }
265                 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
266                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
267                         return -ENOTSUP;
268                 }
269
270                 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
271                 *auth_xform = xform;
272                 *cipher_xform = xform->next;
273                 return 0;
274         }
275
276         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
277                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
278                         /*
279                          * CCM requires to hash first and cipher later
280                          * when encrypting
281                          */
282                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
283                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
284                                 *aead_xform = xform;
285                                 return 0;
286                                 } else {
287                                         *mode =
288                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
289                                         *aead_xform = xform;
290                                         return 0;
291                                 }
292                 } else {
293                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
294                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
295                                 *aead_xform = xform;
296                                 return 0;
297                         }
298                         *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
299                         *aead_xform = xform;
300                         return 0;
301                 }
302         }
303
304         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
305         return -ENOTSUP;
306 }
307
308 /** Device creation function */
309 int
310 ipsec_mb_create(struct rte_vdev_device *vdev,
311         enum ipsec_mb_pmd_types pmd_type);
312
313 /** Device remove function */
314 int
315 ipsec_mb_remove(struct rte_vdev_device *vdev);
316
317 /** Configure queue pair PMD type specific data */
318 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
319
320 /** Configure session PMD type specific data */
321 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
322                 void *session_private,
323                 const struct rte_crypto_sym_xform *xform);
324
325 /** Configure internals PMD type specific data */
326 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
327
328 /** Per PMD type operation and data */
329 struct ipsec_mb_internals {
330         uint8_t is_configured;
331         dequeue_pkt_burst_t dequeue_burst;
332         ipsec_mb_dev_configure_t dev_config;
333         ipsec_mb_queue_pair_configure_t queue_pair_configure;
334         ipsec_mb_session_configure_t session_configure;
335         const struct rte_cryptodev_capabilities *caps;
336         struct rte_cryptodev_ops *ops;
337         struct rte_security_ops *security_ops;
338         uint64_t feature_flags;
339         uint32_t session_priv_size;
340         uint32_t qp_priv_size;
341         uint32_t internals_priv_size;
342 };
343
344 /** Global PMD type specific data */
345 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
346
347 int
348 ipsec_mb_config(struct rte_cryptodev *dev,
349         struct rte_cryptodev_config *config);
350
351 int
352 ipsec_mb_start(struct rte_cryptodev *dev);
353
354 void
355 ipsec_mb_stop(struct rte_cryptodev *dev);
356
357 int
358 ipsec_mb_close(struct rte_cryptodev *dev);
359
360 void
361 ipsec_mb_stats_get(struct rte_cryptodev *dev,
362                 struct rte_cryptodev_stats *stats);
363
364 void
365 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
366
367 void
368 ipsec_mb_info_get(struct rte_cryptodev *dev,
369                 struct rte_cryptodev_info *dev_info);
370
371 int
372 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
373
374 int
375 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
376
377 int
378 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
379                                  const struct rte_cryptodev_qp_conf *qp_conf,
380                                  int socket_id);
381
382 /** Returns the size of the aesni multi-buffer session structure */
383 unsigned
384 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
385
386 /** Configure an aesni multi-buffer session from a crypto xform chain */
387 int ipsec_mb_sym_session_configure(
388         struct rte_cryptodev *dev,
389         struct rte_crypto_sym_xform *xform,
390         struct rte_cryptodev_sym_session *sess,
391         struct rte_mempool *mempool);
392
393 /** Clear the memory of session so it does not leave key material behind */
394 void
395 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
396                                 struct rte_cryptodev_sym_session *sess);
397
398 /** Get session from op. If sessionless create a session */
399 static __rte_always_inline void *
400 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
401 {
402         void *sess = NULL;
403         uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
404         struct rte_crypto_sym_op *sym_op = op->sym;
405         uint8_t sess_type = op->sess_type;
406         void *_sess;
407         void *_sess_private_data = NULL;
408         struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
409
410         switch (sess_type) {
411         case RTE_CRYPTO_OP_WITH_SESSION:
412                 if (likely(sym_op->session != NULL))
413                         sess = get_sym_session_private_data(sym_op->session,
414                                                             driver_id);
415         break;
416         case RTE_CRYPTO_OP_SESSIONLESS:
417                 if (!qp->sess_mp ||
418                     rte_mempool_get(qp->sess_mp, (void **)&_sess))
419                         return NULL;
420
421                 if (!qp->sess_mp_priv ||
422                     rte_mempool_get(qp->sess_mp_priv,
423                                         (void **)&_sess_private_data))
424                         return NULL;
425
426                 sess = _sess_private_data;
427                 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
428                                 sess, sym_op->xform) != 0)) {
429                         rte_mempool_put(qp->sess_mp, _sess);
430                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
431                         sess = NULL;
432                 }
433
434                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
435                 set_sym_session_private_data(sym_op->session, driver_id,
436                                              _sess_private_data);
437         break;
438         default:
439                 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
440         }
441
442         if (unlikely(sess == NULL))
443                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
444
445         return sess;
446 }
447
448 #endif /* _IPSEC_MB_PRIVATE_H_ */