ed755dbf13f24e25e9a189d8178d48a71e133370
[dpdk.git] / drivers / crypto / ipsec_mb / ipsec_mb_private.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
11
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
16 #endif
17
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
20
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
23
24 enum ipsec_mb_vector_mode {
25         IPSEC_MB_NOT_SUPPORTED = 0,
26         IPSEC_MB_SSE,
27         IPSEC_MB_AVX,
28         IPSEC_MB_AVX2,
29         IPSEC_MB_AVX512
30 };
31
32 extern enum ipsec_mb_vector_mode vector_mode;
33
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
36
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
39
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
42
43 /** PMD LOGTYPE DRIVER, common to all PMDs */
44 extern int ipsec_mb_logtype_driver;
45 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
46         rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
47                 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
48
49 /** All supported device types */
50 enum ipsec_mb_pmd_types {
51         IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
52         IPSEC_MB_PMD_TYPE_AESNI_GCM,
53         IPSEC_MB_N_PMD_TYPES
54 };
55
56 /** Crypto operations */
57 enum ipsec_mb_operation {
58         IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
59         IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
60         IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
61         IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
62         IPSEC_MB_OP_ENCRYPT_ONLY,
63         IPSEC_MB_OP_DECRYPT_ONLY,
64         IPSEC_MB_OP_HASH_GEN_ONLY,
65         IPSEC_MB_OP_HASH_VERIFY_ONLY,
66         IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
67         IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
68         IPSEC_MB_OP_NOT_SUPPORTED
69 };
70
71 extern uint8_t pmd_driver_id_aesni_mb;
72 extern uint8_t pmd_driver_id_aesni_gcm;
73
74 /** Helper function. Gets driver ID based on PMD type */
75 static __rte_always_inline uint8_t
76 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
77 {
78         switch (pmd_type) {
79         case IPSEC_MB_PMD_TYPE_AESNI_MB:
80                 return pmd_driver_id_aesni_mb;
81         case IPSEC_MB_PMD_TYPE_AESNI_GCM:
82                 return pmd_driver_id_aesni_gcm;
83         default:
84                 break;
85         }
86         return UINT8_MAX;
87 }
88
89 /** Common private data structure for each PMD */
90 struct ipsec_mb_dev_private {
91         enum ipsec_mb_pmd_types pmd_type;
92         /**< PMD  type */
93         uint32_t max_nb_queue_pairs;
94         /**< Max number of queue pairs supported by device */
95         __extension__ uint8_t priv[0];
96 };
97
98 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
99 struct ipsec_mb_qp {
100         uint16_t id;
101         /**< Queue Pair Identifier */
102         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
103         struct rte_ring *ingress_queue;
104         /**< Ring for placing operations ready for processing */
105         struct rte_mempool *sess_mp;
106         /**< Session Mempool */
107         struct rte_mempool *sess_mp_priv;
108         /**< Session Private Data Mempool */
109         struct rte_cryptodev_stats stats;
110         /**< Queue pair statistics */
111         enum ipsec_mb_pmd_types pmd_type;
112         /**< pmd type */
113         uint8_t digest_idx;
114         /**< Index of the next
115          * slot to be used in temp_digests,
116          * to store the digest for a given operation
117          */
118         IMB_MGR *mb_mgr;
119         /* Multi buffer manager */
120         const struct rte_memzone *mb_mgr_mz;
121         /* Shared memzone for storing mb_mgr */
122         __extension__ uint8_t additional_data[0];
123         /**< Storing PMD specific additional data */
124 };
125
126 static __rte_always_inline void *
127 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
128 {
129         return (void *)qp->additional_data;
130 }
131
132 /** Helper function. Allocates job manager */
133 static __rte_always_inline IMB_MGR *
134 alloc_init_mb_mgr(void)
135 {
136         IMB_MGR *mb_mgr = alloc_mb_mgr(0);
137
138         if (unlikely(mb_mgr == NULL)) {
139                 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
140                 return NULL;
141         }
142
143         init_mb_mgr_auto(mb_mgr, NULL);
144
145         return mb_mgr;
146 }
147
148 /** Helper function. Gets per thread job manager */
149 static __rte_always_inline IMB_MGR *
150 get_per_thread_mb_mgr(void)
151 {
152         if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
153                 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
154
155         return RTE_PER_LCORE(mb_mgr);
156 }
157
158 /** Helper function. Gets mode and chained xforms from the xform */
159 static __rte_always_inline int
160 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
161                         enum ipsec_mb_operation *mode,
162                         const struct rte_crypto_sym_xform **auth_xform,
163                         const struct rte_crypto_sym_xform **cipher_xform,
164                         const struct rte_crypto_sym_xform **aead_xform)
165 {
166         const struct rte_crypto_sym_xform *next = xform->next;
167
168         if (xform == NULL) {
169                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
170                 return -ENOTSUP;
171         }
172
173         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
174                 if (next == NULL) {
175                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
176                                 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
177                                 *cipher_xform = xform;
178                                 *auth_xform = NULL;
179                                 return 0;
180                         }
181                         *mode = IPSEC_MB_OP_DECRYPT_ONLY;
182                         *cipher_xform = xform;
183                         *auth_xform = NULL;
184                         return 0;
185                 }
186
187                 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
188                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
189                         return -ENOTSUP;
190                 }
191
192                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
193                         if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
194                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
195                                 return -ENOTSUP;
196                         }
197
198                         *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
199                         *cipher_xform = xform;
200                         *auth_xform = xform->next;
201                         return 0;
202                 }
203                 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
204                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
205                         return -ENOTSUP;
206                 }
207
208                 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
209                 *cipher_xform = xform;
210                 *auth_xform = xform->next;
211                 return 0;
212         }
213
214         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
215                 if (next == NULL) {
216                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
217                                 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
218                                 *auth_xform = xform;
219                                 *cipher_xform = NULL;
220                                 return 0;
221                         }
222                         *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
223                         *auth_xform = xform;
224                         *cipher_xform = NULL;
225                         return 0;
226                 }
227
228                 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
229                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
230                         return -ENOTSUP;
231                 }
232
233                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
234                         if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
235                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
236                                 return -ENOTSUP;
237                         }
238
239                         *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
240                         *auth_xform = xform;
241                         *cipher_xform = xform->next;
242                         return 0;
243                 }
244                 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
245                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
246                         return -ENOTSUP;
247                 }
248
249                 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
250                 *auth_xform = xform;
251                 *cipher_xform = xform->next;
252                 return 0;
253         }
254
255         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
256                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
257                         /*
258                          * CCM requires to hash first and cipher later
259                          * when encrypting
260                          */
261                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
262                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
263                                 *aead_xform = xform;
264                                 return 0;
265                                 } else {
266                                         *mode =
267                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
268                                         *aead_xform = xform;
269                                         return 0;
270                                 }
271                 } else {
272                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
273                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
274                                 *aead_xform = xform;
275                                 return 0;
276                         }
277                         *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
278                         *aead_xform = xform;
279                         return 0;
280                 }
281         }
282
283         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
284         return -ENOTSUP;
285 }
286
287 /** Device creation function */
288 int
289 ipsec_mb_create(struct rte_vdev_device *vdev,
290         enum ipsec_mb_pmd_types pmd_type);
291
292 /** Device remove function */
293 int
294 ipsec_mb_remove(struct rte_vdev_device *vdev);
295
296 /** Configure queue pair PMD type specific data */
297 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
298
299 /** Configure session PMD type specific data */
300 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
301                 void *session_private,
302                 const struct rte_crypto_sym_xform *xform);
303
304 /** Configure internals PMD type specific data */
305 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
306
307 /** Per PMD type operation and data */
308 struct ipsec_mb_internals {
309         uint8_t is_configured;
310         dequeue_pkt_burst_t dequeue_burst;
311         ipsec_mb_dev_configure_t dev_config;
312         ipsec_mb_queue_pair_configure_t queue_pair_configure;
313         ipsec_mb_session_configure_t session_configure;
314         const struct rte_cryptodev_capabilities *caps;
315         struct rte_cryptodev_ops *ops;
316         struct rte_security_ops *security_ops;
317         uint64_t feature_flags;
318         uint32_t session_priv_size;
319         uint32_t qp_priv_size;
320         uint32_t internals_priv_size;
321 };
322
323 /** Global PMD type specific data */
324 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
325
326 int
327 ipsec_mb_config(struct rte_cryptodev *dev,
328         struct rte_cryptodev_config *config);
329
330 int
331 ipsec_mb_start(struct rte_cryptodev *dev);
332
333 void
334 ipsec_mb_stop(struct rte_cryptodev *dev);
335
336 int
337 ipsec_mb_close(struct rte_cryptodev *dev);
338
339 void
340 ipsec_mb_stats_get(struct rte_cryptodev *dev,
341                 struct rte_cryptodev_stats *stats);
342
343 void
344 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
345
346 void
347 ipsec_mb_info_get(struct rte_cryptodev *dev,
348                 struct rte_cryptodev_info *dev_info);
349
350 int
351 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
352
353 int
354 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
355
356 int
357 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
358                                  const struct rte_cryptodev_qp_conf *qp_conf,
359                                  int socket_id);
360
361 /** Returns the size of the aesni multi-buffer session structure */
362 unsigned
363 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
364
365 /** Configure an aesni multi-buffer session from a crypto xform chain */
366 int ipsec_mb_sym_session_configure(
367         struct rte_cryptodev *dev,
368         struct rte_crypto_sym_xform *xform,
369         struct rte_cryptodev_sym_session *sess,
370         struct rte_mempool *mempool);
371
372 /** Clear the memory of session so it does not leave key material behind */
373 void
374 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
375                                 struct rte_cryptodev_sym_session *sess);
376
377 /** Get session from op. If sessionless create a session */
378 static __rte_always_inline void *
379 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
380 {
381         void *sess = NULL;
382         uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
383         struct rte_crypto_sym_op *sym_op = op->sym;
384         uint8_t sess_type = op->sess_type;
385         void *_sess;
386         void *_sess_private_data = NULL;
387         struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
388
389         switch (sess_type) {
390         case RTE_CRYPTO_OP_WITH_SESSION:
391                 if (likely(sym_op->session != NULL))
392                         sess = get_sym_session_private_data(sym_op->session,
393                                                             driver_id);
394         break;
395         case RTE_CRYPTO_OP_SESSIONLESS:
396                 if (!qp->sess_mp ||
397                     rte_mempool_get(qp->sess_mp, (void **)&_sess))
398                         return NULL;
399
400                 if (!qp->sess_mp_priv ||
401                     rte_mempool_get(qp->sess_mp_priv,
402                                         (void **)&_sess_private_data))
403                         return NULL;
404
405                 sess = _sess_private_data;
406                 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
407                                 sess, sym_op->xform) != 0)) {
408                         rte_mempool_put(qp->sess_mp, _sess);
409                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
410                         sess = NULL;
411                 }
412
413                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
414                 set_sym_session_private_data(sym_op->session, driver_id,
415                                              _sess_private_data);
416         break;
417         default:
418                 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
419         }
420
421         if (unlikely(sess == NULL))
422                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
423
424         return sess;
425 }
426
427 #endif /* _IPSEC_MB_PRIVATE_H_ */