crypto/ipsec_mb: move aesni_mb PMD
[dpdk.git] / drivers / crypto / ipsec_mb / ipsec_mb_private.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
11
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
16 #endif
17
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
20
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
23
24 enum ipsec_mb_vector_mode {
25         IPSEC_MB_NOT_SUPPORTED = 0,
26         IPSEC_MB_SSE,
27         IPSEC_MB_AVX,
28         IPSEC_MB_AVX2,
29         IPSEC_MB_AVX512
30 };
31
32 extern enum ipsec_mb_vector_mode vector_mode;
33
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
36
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
39
40 /** PMD LOGTYPE DRIVER, common to all PMDs */
41 extern int ipsec_mb_logtype_driver;
42 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
43         rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
44                 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
45
46 /** All supported device types */
47 enum ipsec_mb_pmd_types {
48         IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
49         IPSEC_MB_N_PMD_TYPES
50 };
51
52 /** Crypto operations */
53 enum ipsec_mb_operation {
54         IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
55         IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
56         IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
57         IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
58         IPSEC_MB_OP_ENCRYPT_ONLY,
59         IPSEC_MB_OP_DECRYPT_ONLY,
60         IPSEC_MB_OP_HASH_GEN_ONLY,
61         IPSEC_MB_OP_HASH_VERIFY_ONLY,
62         IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
63         IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
64         IPSEC_MB_OP_NOT_SUPPORTED
65 };
66
67 extern uint8_t pmd_driver_id_aesni_mb;
68
69 /** Helper function. Gets driver ID based on PMD type */
70 static __rte_always_inline uint8_t
71 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
72 {
73         switch (pmd_type) {
74         case IPSEC_MB_PMD_TYPE_AESNI_MB:
75                 return pmd_driver_id_aesni_mb;
76         default:
77                 break;
78         }
79         return UINT8_MAX;
80 }
81
82 /** Common private data structure for each PMD */
83 struct ipsec_mb_dev_private {
84         enum ipsec_mb_pmd_types pmd_type;
85         /**< PMD  type */
86         uint32_t max_nb_queue_pairs;
87         /**< Max number of queue pairs supported by device */
88         __extension__ uint8_t priv[0];
89 };
90
91 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
92 struct ipsec_mb_qp {
93         uint16_t id;
94         /**< Queue Pair Identifier */
95         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
96         struct rte_ring *ingress_queue;
97         /**< Ring for placing operations ready for processing */
98         struct rte_mempool *sess_mp;
99         /**< Session Mempool */
100         struct rte_mempool *sess_mp_priv;
101         /**< Session Private Data Mempool */
102         struct rte_cryptodev_stats stats;
103         /**< Queue pair statistics */
104         enum ipsec_mb_pmd_types pmd_type;
105         /**< pmd type */
106         uint8_t digest_idx;
107         /**< Index of the next
108          * slot to be used in temp_digests,
109          * to store the digest for a given operation
110          */
111         IMB_MGR *mb_mgr;
112         /* Multi buffer manager */
113         const struct rte_memzone *mb_mgr_mz;
114         /* Shared memzone for storing mb_mgr */
115         __extension__ uint8_t additional_data[0];
116         /**< Storing PMD specific additional data */
117 };
118
119 static __rte_always_inline void *
120 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
121 {
122         return (void *)qp->additional_data;
123 }
124
125 /** Helper function. Allocates job manager */
126 static __rte_always_inline IMB_MGR *
127 alloc_init_mb_mgr(void)
128 {
129         IMB_MGR *mb_mgr = alloc_mb_mgr(0);
130
131         if (unlikely(mb_mgr == NULL)) {
132                 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
133                 return NULL;
134         }
135
136         init_mb_mgr_auto(mb_mgr, NULL);
137
138         return mb_mgr;
139 }
140
141 /** Helper function. Gets per thread job manager */
142 static __rte_always_inline IMB_MGR *
143 get_per_thread_mb_mgr(void)
144 {
145         if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
146                 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
147
148         return RTE_PER_LCORE(mb_mgr);
149 }
150
151 /** Helper function. Gets mode and chained xforms from the xform */
152 static __rte_always_inline int
153 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
154                         enum ipsec_mb_operation *mode,
155                         const struct rte_crypto_sym_xform **auth_xform,
156                         const struct rte_crypto_sym_xform **cipher_xform,
157                         const struct rte_crypto_sym_xform **aead_xform)
158 {
159         const struct rte_crypto_sym_xform *next = xform->next;
160
161         if (xform == NULL) {
162                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
163                 return -ENOTSUP;
164         }
165
166         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
167                 if (next == NULL) {
168                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
169                                 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
170                                 *cipher_xform = xform;
171                                 *auth_xform = NULL;
172                                 return 0;
173                         }
174                         *mode = IPSEC_MB_OP_DECRYPT_ONLY;
175                         *cipher_xform = xform;
176                         *auth_xform = NULL;
177                         return 0;
178                 }
179
180                 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
181                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
182                         return -ENOTSUP;
183                 }
184
185                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
186                         if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
187                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
188                                 return -ENOTSUP;
189                         }
190
191                         *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
192                         *cipher_xform = xform;
193                         *auth_xform = xform->next;
194                         return 0;
195                 }
196                 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
197                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
198                         return -ENOTSUP;
199                 }
200
201                 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
202                 *cipher_xform = xform;
203                 *auth_xform = xform->next;
204                 return 0;
205         }
206
207         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
208                 if (next == NULL) {
209                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
210                                 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
211                                 *auth_xform = xform;
212                                 *cipher_xform = NULL;
213                                 return 0;
214                         }
215                         *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
216                         *auth_xform = xform;
217                         *cipher_xform = NULL;
218                         return 0;
219                 }
220
221                 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
222                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
223                         return -ENOTSUP;
224                 }
225
226                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
227                         if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
228                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
229                                 return -ENOTSUP;
230                         }
231
232                         *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
233                         *auth_xform = xform;
234                         *cipher_xform = xform->next;
235                         return 0;
236                 }
237                 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
238                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
239                         return -ENOTSUP;
240                 }
241
242                 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
243                 *auth_xform = xform;
244                 *cipher_xform = xform->next;
245                 return 0;
246         }
247
248         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
249                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
250                         /*
251                          * CCM requires to hash first and cipher later
252                          * when encrypting
253                          */
254                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
255                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
256                                 *aead_xform = xform;
257                                 return 0;
258                                 } else {
259                                         *mode =
260                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
261                                         *aead_xform = xform;
262                                         return 0;
263                                 }
264                 } else {
265                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
266                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
267                                 *aead_xform = xform;
268                                 return 0;
269                         }
270                         *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
271                         *aead_xform = xform;
272                         return 0;
273                 }
274         }
275
276         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
277         return -ENOTSUP;
278 }
279
280 /** Device creation function */
281 int
282 ipsec_mb_create(struct rte_vdev_device *vdev,
283         enum ipsec_mb_pmd_types pmd_type);
284
285 /** Device remove function */
286 int
287 ipsec_mb_remove(struct rte_vdev_device *vdev);
288
289 /** Configure queue pair PMD type specific data */
290 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
291
292 /** Configure session PMD type specific data */
293 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
294                 void *session_private,
295                 const struct rte_crypto_sym_xform *xform);
296
297 /** Configure internals PMD type specific data */
298 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
299
300 /** Per PMD type operation and data */
301 struct ipsec_mb_internals {
302         uint8_t is_configured;
303         dequeue_pkt_burst_t dequeue_burst;
304         ipsec_mb_dev_configure_t dev_config;
305         ipsec_mb_queue_pair_configure_t queue_pair_configure;
306         ipsec_mb_session_configure_t session_configure;
307         const struct rte_cryptodev_capabilities *caps;
308         struct rte_cryptodev_ops *ops;
309         struct rte_security_ops *security_ops;
310         uint64_t feature_flags;
311         uint32_t session_priv_size;
312         uint32_t qp_priv_size;
313         uint32_t internals_priv_size;
314 };
315
316 /** Global PMD type specific data */
317 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
318
319 int
320 ipsec_mb_config(struct rte_cryptodev *dev,
321         struct rte_cryptodev_config *config);
322
323 int
324 ipsec_mb_start(struct rte_cryptodev *dev);
325
326 void
327 ipsec_mb_stop(struct rte_cryptodev *dev);
328
329 int
330 ipsec_mb_close(struct rte_cryptodev *dev);
331
332 void
333 ipsec_mb_stats_get(struct rte_cryptodev *dev,
334                 struct rte_cryptodev_stats *stats);
335
336 void
337 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
338
339 void
340 ipsec_mb_info_get(struct rte_cryptodev *dev,
341                 struct rte_cryptodev_info *dev_info);
342
343 int
344 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
345
346 int
347 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
348
349 int
350 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
351                                  const struct rte_cryptodev_qp_conf *qp_conf,
352                                  int socket_id);
353
354 /** Returns the size of the aesni multi-buffer session structure */
355 unsigned
356 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
357
358 /** Configure an aesni multi-buffer session from a crypto xform chain */
359 int ipsec_mb_sym_session_configure(
360         struct rte_cryptodev *dev,
361         struct rte_crypto_sym_xform *xform,
362         struct rte_cryptodev_sym_session *sess,
363         struct rte_mempool *mempool);
364
365 /** Clear the memory of session so it does not leave key material behind */
366 void
367 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
368                                 struct rte_cryptodev_sym_session *sess);
369
370 /** Get session from op. If sessionless create a session */
371 static __rte_always_inline void *
372 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
373 {
374         void *sess = NULL;
375         uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
376         struct rte_crypto_sym_op *sym_op = op->sym;
377         uint8_t sess_type = op->sess_type;
378         void *_sess;
379         void *_sess_private_data = NULL;
380         struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
381
382         switch (sess_type) {
383         case RTE_CRYPTO_OP_WITH_SESSION:
384                 if (likely(sym_op->session != NULL))
385                         sess = get_sym_session_private_data(sym_op->session,
386                                                             driver_id);
387         break;
388         case RTE_CRYPTO_OP_SESSIONLESS:
389                 if (!qp->sess_mp ||
390                     rte_mempool_get(qp->sess_mp, (void **)&_sess))
391                         return NULL;
392
393                 if (!qp->sess_mp_priv ||
394                     rte_mempool_get(qp->sess_mp_priv,
395                                         (void **)&_sess_private_data))
396                         return NULL;
397
398                 sess = _sess_private_data;
399                 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
400                                 sess, sym_op->xform) != 0)) {
401                         rte_mempool_put(qp->sess_mp, _sess);
402                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
403                         sess = NULL;
404                 }
405
406                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
407                 set_sym_session_private_data(sym_op->session, driver_id,
408                                              _sess_private_data);
409         break;
410         default:
411                 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
412         }
413
414         if (unlikely(sess == NULL))
415                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
416
417         return sess;
418 }
419
420 #endif /* _IPSEC_MB_PRIVATE_H_ */