crypto/ipsec_mb: add chacha_poly PMD
[dpdk.git] / drivers / crypto / ipsec_mb / ipsec_mb_private.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
11
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
16 #endif
17
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
20
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
23
24 enum ipsec_mb_vector_mode {
25         IPSEC_MB_NOT_SUPPORTED = 0,
26         IPSEC_MB_SSE,
27         IPSEC_MB_AVX,
28         IPSEC_MB_AVX2,
29         IPSEC_MB_AVX512
30 };
31
32 extern enum ipsec_mb_vector_mode vector_mode;
33
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
36
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
39
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
42
43 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
44 /**< IPSEC Multi buffer PMD kasumi device name */
45
46 #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
47 /**< IPSEC Multi buffer PMD snow3g device name */
48
49 #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
50 /**< IPSEC Multi buffer PMD zuc device name */
51
52 #define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD crypto_chacha20_poly1305
53 /**< IPSEC Multi buffer PMD chacha20_poly1305 device name */
54
55 /** PMD LOGTYPE DRIVER, common to all PMDs */
56 extern int ipsec_mb_logtype_driver;
57 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
58         rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
59                 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
60
61 /** All supported device types */
62 enum ipsec_mb_pmd_types {
63         IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
64         IPSEC_MB_PMD_TYPE_AESNI_GCM,
65         IPSEC_MB_PMD_TYPE_KASUMI,
66         IPSEC_MB_PMD_TYPE_SNOW3G,
67         IPSEC_MB_PMD_TYPE_ZUC,
68         IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305,
69         IPSEC_MB_N_PMD_TYPES
70 };
71
72 /** Crypto operations */
73 enum ipsec_mb_operation {
74         IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
75         IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
76         IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
77         IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
78         IPSEC_MB_OP_ENCRYPT_ONLY,
79         IPSEC_MB_OP_DECRYPT_ONLY,
80         IPSEC_MB_OP_HASH_GEN_ONLY,
81         IPSEC_MB_OP_HASH_VERIFY_ONLY,
82         IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
83         IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
84         IPSEC_MB_OP_NOT_SUPPORTED
85 };
86
87 extern uint8_t pmd_driver_id_aesni_mb;
88 extern uint8_t pmd_driver_id_aesni_gcm;
89 extern uint8_t pmd_driver_id_kasumi;
90 extern uint8_t pmd_driver_id_snow3g;
91 extern uint8_t pmd_driver_id_zuc;
92 extern uint8_t pmd_driver_id_chacha20_poly1305;
93
94 /** Helper function. Gets driver ID based on PMD type */
95 static __rte_always_inline uint8_t
96 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
97 {
98         switch (pmd_type) {
99         case IPSEC_MB_PMD_TYPE_AESNI_MB:
100                 return pmd_driver_id_aesni_mb;
101         case IPSEC_MB_PMD_TYPE_AESNI_GCM:
102                 return pmd_driver_id_aesni_gcm;
103         case IPSEC_MB_PMD_TYPE_KASUMI:
104                 return pmd_driver_id_kasumi;
105         case IPSEC_MB_PMD_TYPE_SNOW3G:
106                 return pmd_driver_id_snow3g;
107         case IPSEC_MB_PMD_TYPE_ZUC:
108                 return pmd_driver_id_zuc;
109         case IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305:
110                 return pmd_driver_id_chacha20_poly1305;
111         default:
112                 break;
113         }
114         return UINT8_MAX;
115 }
116
117 /** Common private data structure for each PMD */
118 struct ipsec_mb_dev_private {
119         enum ipsec_mb_pmd_types pmd_type;
120         /**< PMD  type */
121         uint32_t max_nb_queue_pairs;
122         /**< Max number of queue pairs supported by device */
123         __extension__ uint8_t priv[0];
124 };
125
126 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
127 struct ipsec_mb_qp {
128         uint16_t id;
129         /**< Queue Pair Identifier */
130         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
131         struct rte_ring *ingress_queue;
132         /**< Ring for placing operations ready for processing */
133         struct rte_mempool *sess_mp;
134         /**< Session Mempool */
135         struct rte_mempool *sess_mp_priv;
136         /**< Session Private Data Mempool */
137         struct rte_cryptodev_stats stats;
138         /**< Queue pair statistics */
139         enum ipsec_mb_pmd_types pmd_type;
140         /**< pmd type */
141         uint8_t digest_idx;
142         /**< Index of the next
143          * slot to be used in temp_digests,
144          * to store the digest for a given operation
145          */
146         IMB_MGR *mb_mgr;
147         /* Multi buffer manager */
148         const struct rte_memzone *mb_mgr_mz;
149         /* Shared memzone for storing mb_mgr */
150         __extension__ uint8_t additional_data[0];
151         /**< Storing PMD specific additional data */
152 };
153
154 static __rte_always_inline void *
155 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
156 {
157         return (void *)qp->additional_data;
158 }
159
160 /** Helper function. Allocates job manager */
161 static __rte_always_inline IMB_MGR *
162 alloc_init_mb_mgr(void)
163 {
164         IMB_MGR *mb_mgr = alloc_mb_mgr(0);
165
166         if (unlikely(mb_mgr == NULL)) {
167                 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
168                 return NULL;
169         }
170
171         init_mb_mgr_auto(mb_mgr, NULL);
172
173         return mb_mgr;
174 }
175
176 /** Helper function. Gets per thread job manager */
177 static __rte_always_inline IMB_MGR *
178 get_per_thread_mb_mgr(void)
179 {
180         if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
181                 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
182
183         return RTE_PER_LCORE(mb_mgr);
184 }
185
186 /** Helper function. Gets mode and chained xforms from the xform */
187 static __rte_always_inline int
188 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
189                         enum ipsec_mb_operation *mode,
190                         const struct rte_crypto_sym_xform **auth_xform,
191                         const struct rte_crypto_sym_xform **cipher_xform,
192                         const struct rte_crypto_sym_xform **aead_xform)
193 {
194         const struct rte_crypto_sym_xform *next = xform->next;
195
196         if (xform == NULL) {
197                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
198                 return -ENOTSUP;
199         }
200
201         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
202                 if (next == NULL) {
203                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
204                                 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
205                                 *cipher_xform = xform;
206                                 *auth_xform = NULL;
207                                 return 0;
208                         }
209                         *mode = IPSEC_MB_OP_DECRYPT_ONLY;
210                         *cipher_xform = xform;
211                         *auth_xform = NULL;
212                         return 0;
213                 }
214
215                 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
216                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
217                         return -ENOTSUP;
218                 }
219
220                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
221                         if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
222                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
223                                 return -ENOTSUP;
224                         }
225
226                         *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
227                         *cipher_xform = xform;
228                         *auth_xform = xform->next;
229                         return 0;
230                 }
231                 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
232                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
233                         return -ENOTSUP;
234                 }
235
236                 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
237                 *cipher_xform = xform;
238                 *auth_xform = xform->next;
239                 return 0;
240         }
241
242         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
243                 if (next == NULL) {
244                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
245                                 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
246                                 *auth_xform = xform;
247                                 *cipher_xform = NULL;
248                                 return 0;
249                         }
250                         *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
251                         *auth_xform = xform;
252                         *cipher_xform = NULL;
253                         return 0;
254                 }
255
256                 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
257                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
258                         return -ENOTSUP;
259                 }
260
261                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
262                         if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
263                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
264                                 return -ENOTSUP;
265                         }
266
267                         *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
268                         *auth_xform = xform;
269                         *cipher_xform = xform->next;
270                         return 0;
271                 }
272                 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
273                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
274                         return -ENOTSUP;
275                 }
276
277                 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
278                 *auth_xform = xform;
279                 *cipher_xform = xform->next;
280                 return 0;
281         }
282
283         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
284                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
285                         /*
286                          * CCM requires to hash first and cipher later
287                          * when encrypting
288                          */
289                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
290                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
291                                 *aead_xform = xform;
292                                 return 0;
293                                 } else {
294                                         *mode =
295                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
296                                         *aead_xform = xform;
297                                         return 0;
298                                 }
299                 } else {
300                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
301                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
302                                 *aead_xform = xform;
303                                 return 0;
304                         }
305                         *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
306                         *aead_xform = xform;
307                         return 0;
308                 }
309         }
310
311         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
312         return -ENOTSUP;
313 }
314
315 /** Device creation function */
316 int
317 ipsec_mb_create(struct rte_vdev_device *vdev,
318         enum ipsec_mb_pmd_types pmd_type);
319
320 /** Device remove function */
321 int
322 ipsec_mb_remove(struct rte_vdev_device *vdev);
323
324 /** Configure queue pair PMD type specific data */
325 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
326
327 /** Configure session PMD type specific data */
328 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
329                 void *session_private,
330                 const struct rte_crypto_sym_xform *xform);
331
332 /** Configure internals PMD type specific data */
333 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
334
335 /** Per PMD type operation and data */
336 struct ipsec_mb_internals {
337         uint8_t is_configured;
338         dequeue_pkt_burst_t dequeue_burst;
339         ipsec_mb_dev_configure_t dev_config;
340         ipsec_mb_queue_pair_configure_t queue_pair_configure;
341         ipsec_mb_session_configure_t session_configure;
342         const struct rte_cryptodev_capabilities *caps;
343         struct rte_cryptodev_ops *ops;
344         struct rte_security_ops *security_ops;
345         uint64_t feature_flags;
346         uint32_t session_priv_size;
347         uint32_t qp_priv_size;
348         uint32_t internals_priv_size;
349 };
350
351 /** Global PMD type specific data */
352 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
353
354 int
355 ipsec_mb_config(struct rte_cryptodev *dev,
356         struct rte_cryptodev_config *config);
357
358 int
359 ipsec_mb_start(struct rte_cryptodev *dev);
360
361 void
362 ipsec_mb_stop(struct rte_cryptodev *dev);
363
364 int
365 ipsec_mb_close(struct rte_cryptodev *dev);
366
367 void
368 ipsec_mb_stats_get(struct rte_cryptodev *dev,
369                 struct rte_cryptodev_stats *stats);
370
371 void
372 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
373
374 void
375 ipsec_mb_info_get(struct rte_cryptodev *dev,
376                 struct rte_cryptodev_info *dev_info);
377
378 int
379 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
380
381 int
382 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
383
384 int
385 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
386                                  const struct rte_cryptodev_qp_conf *qp_conf,
387                                  int socket_id);
388
389 /** Returns the size of the aesni multi-buffer session structure */
390 unsigned
391 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
392
393 /** Configure an aesni multi-buffer session from a crypto xform chain */
394 int ipsec_mb_sym_session_configure(
395         struct rte_cryptodev *dev,
396         struct rte_crypto_sym_xform *xform,
397         struct rte_cryptodev_sym_session *sess,
398         struct rte_mempool *mempool);
399
400 /** Clear the memory of session so it does not leave key material behind */
401 void
402 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
403                                 struct rte_cryptodev_sym_session *sess);
404
405 /** Get session from op. If sessionless create a session */
406 static __rte_always_inline void *
407 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
408 {
409         void *sess = NULL;
410         uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
411         struct rte_crypto_sym_op *sym_op = op->sym;
412         uint8_t sess_type = op->sess_type;
413         void *_sess;
414         void *_sess_private_data = NULL;
415         struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
416
417         switch (sess_type) {
418         case RTE_CRYPTO_OP_WITH_SESSION:
419                 if (likely(sym_op->session != NULL))
420                         sess = get_sym_session_private_data(sym_op->session,
421                                                             driver_id);
422         break;
423         case RTE_CRYPTO_OP_SESSIONLESS:
424                 if (!qp->sess_mp ||
425                     rte_mempool_get(qp->sess_mp, (void **)&_sess))
426                         return NULL;
427
428                 if (!qp->sess_mp_priv ||
429                     rte_mempool_get(qp->sess_mp_priv,
430                                         (void **)&_sess_private_data))
431                         return NULL;
432
433                 sess = _sess_private_data;
434                 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
435                                 sess, sym_op->xform) != 0)) {
436                         rte_mempool_put(qp->sess_mp, _sess);
437                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
438                         sess = NULL;
439                 }
440
441                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
442                 set_sym_session_private_data(sym_op->session, driver_id,
443                                              _sess_private_data);
444         break;
445         default:
446                 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
447         }
448
449         if (unlikely(sess == NULL))
450                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
451
452         return sess;
453 }
454
455 #endif /* _IPSEC_MB_PRIVATE_H_ */