9ee23d8fb5a0b7a47bc94f62610b8278ff32238e
[dpdk.git] / drivers / crypto / ipsec_mb / ipsec_mb_private.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
7
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
11
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
16 #endif
17
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
20
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
23
24 enum ipsec_mb_vector_mode {
25         IPSEC_MB_NOT_SUPPORTED = 0,
26         IPSEC_MB_SSE,
27         IPSEC_MB_AVX,
28         IPSEC_MB_AVX2,
29         IPSEC_MB_AVX512
30 };
31
32 extern enum ipsec_mb_vector_mode vector_mode;
33
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
36
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
39
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
42
43 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
44 /**< IPSEC Multi buffer PMD kasumi device name */
45
46 #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
47 /**< IPSEC Multi buffer PMD snow3g device name */
48
49 /** PMD LOGTYPE DRIVER, common to all PMDs */
50 extern int ipsec_mb_logtype_driver;
51 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
52         rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
53                 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
54
55 /** All supported device types */
56 enum ipsec_mb_pmd_types {
57         IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
58         IPSEC_MB_PMD_TYPE_AESNI_GCM,
59         IPSEC_MB_PMD_TYPE_KASUMI,
60         IPSEC_MB_PMD_TYPE_SNOW3G,
61         IPSEC_MB_N_PMD_TYPES
62 };
63
64 /** Crypto operations */
65 enum ipsec_mb_operation {
66         IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
67         IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
68         IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
69         IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
70         IPSEC_MB_OP_ENCRYPT_ONLY,
71         IPSEC_MB_OP_DECRYPT_ONLY,
72         IPSEC_MB_OP_HASH_GEN_ONLY,
73         IPSEC_MB_OP_HASH_VERIFY_ONLY,
74         IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
75         IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
76         IPSEC_MB_OP_NOT_SUPPORTED
77 };
78
79 extern uint8_t pmd_driver_id_aesni_mb;
80 extern uint8_t pmd_driver_id_aesni_gcm;
81 extern uint8_t pmd_driver_id_kasumi;
82 extern uint8_t pmd_driver_id_snow3g;
83
84 /** Helper function. Gets driver ID based on PMD type */
85 static __rte_always_inline uint8_t
86 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
87 {
88         switch (pmd_type) {
89         case IPSEC_MB_PMD_TYPE_AESNI_MB:
90                 return pmd_driver_id_aesni_mb;
91         case IPSEC_MB_PMD_TYPE_AESNI_GCM:
92                 return pmd_driver_id_aesni_gcm;
93         case IPSEC_MB_PMD_TYPE_KASUMI:
94                 return pmd_driver_id_kasumi;
95         case IPSEC_MB_PMD_TYPE_SNOW3G:
96                 return pmd_driver_id_snow3g;
97         default:
98                 break;
99         }
100         return UINT8_MAX;
101 }
102
103 /** Common private data structure for each PMD */
104 struct ipsec_mb_dev_private {
105         enum ipsec_mb_pmd_types pmd_type;
106         /**< PMD  type */
107         uint32_t max_nb_queue_pairs;
108         /**< Max number of queue pairs supported by device */
109         __extension__ uint8_t priv[0];
110 };
111
112 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
113 struct ipsec_mb_qp {
114         uint16_t id;
115         /**< Queue Pair Identifier */
116         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
117         struct rte_ring *ingress_queue;
118         /**< Ring for placing operations ready for processing */
119         struct rte_mempool *sess_mp;
120         /**< Session Mempool */
121         struct rte_mempool *sess_mp_priv;
122         /**< Session Private Data Mempool */
123         struct rte_cryptodev_stats stats;
124         /**< Queue pair statistics */
125         enum ipsec_mb_pmd_types pmd_type;
126         /**< pmd type */
127         uint8_t digest_idx;
128         /**< Index of the next
129          * slot to be used in temp_digests,
130          * to store the digest for a given operation
131          */
132         IMB_MGR *mb_mgr;
133         /* Multi buffer manager */
134         const struct rte_memzone *mb_mgr_mz;
135         /* Shared memzone for storing mb_mgr */
136         __extension__ uint8_t additional_data[0];
137         /**< Storing PMD specific additional data */
138 };
139
140 static __rte_always_inline void *
141 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
142 {
143         return (void *)qp->additional_data;
144 }
145
146 /** Helper function. Allocates job manager */
147 static __rte_always_inline IMB_MGR *
148 alloc_init_mb_mgr(void)
149 {
150         IMB_MGR *mb_mgr = alloc_mb_mgr(0);
151
152         if (unlikely(mb_mgr == NULL)) {
153                 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
154                 return NULL;
155         }
156
157         init_mb_mgr_auto(mb_mgr, NULL);
158
159         return mb_mgr;
160 }
161
162 /** Helper function. Gets per thread job manager */
163 static __rte_always_inline IMB_MGR *
164 get_per_thread_mb_mgr(void)
165 {
166         if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
167                 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
168
169         return RTE_PER_LCORE(mb_mgr);
170 }
171
172 /** Helper function. Gets mode and chained xforms from the xform */
173 static __rte_always_inline int
174 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
175                         enum ipsec_mb_operation *mode,
176                         const struct rte_crypto_sym_xform **auth_xform,
177                         const struct rte_crypto_sym_xform **cipher_xform,
178                         const struct rte_crypto_sym_xform **aead_xform)
179 {
180         const struct rte_crypto_sym_xform *next = xform->next;
181
182         if (xform == NULL) {
183                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
184                 return -ENOTSUP;
185         }
186
187         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
188                 if (next == NULL) {
189                         if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
190                                 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
191                                 *cipher_xform = xform;
192                                 *auth_xform = NULL;
193                                 return 0;
194                         }
195                         *mode = IPSEC_MB_OP_DECRYPT_ONLY;
196                         *cipher_xform = xform;
197                         *auth_xform = NULL;
198                         return 0;
199                 }
200
201                 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
202                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
203                         return -ENOTSUP;
204                 }
205
206                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
207                         if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
208                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
209                                 return -ENOTSUP;
210                         }
211
212                         *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
213                         *cipher_xform = xform;
214                         *auth_xform = xform->next;
215                         return 0;
216                 }
217                 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
218                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
219                         return -ENOTSUP;
220                 }
221
222                 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
223                 *cipher_xform = xform;
224                 *auth_xform = xform->next;
225                 return 0;
226         }
227
228         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
229                 if (next == NULL) {
230                         if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
231                                 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
232                                 *auth_xform = xform;
233                                 *cipher_xform = NULL;
234                                 return 0;
235                         }
236                         *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
237                         *auth_xform = xform;
238                         *cipher_xform = NULL;
239                         return 0;
240                 }
241
242                 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
243                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
244                         return -ENOTSUP;
245                 }
246
247                 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
248                         if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
249                                 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
250                                 return -ENOTSUP;
251                         }
252
253                         *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
254                         *auth_xform = xform;
255                         *cipher_xform = xform->next;
256                         return 0;
257                 }
258                 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
259                         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
260                         return -ENOTSUP;
261                 }
262
263                 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
264                 *auth_xform = xform;
265                 *cipher_xform = xform->next;
266                 return 0;
267         }
268
269         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
270                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
271                         /*
272                          * CCM requires to hash first and cipher later
273                          * when encrypting
274                          */
275                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
276                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
277                                 *aead_xform = xform;
278                                 return 0;
279                                 } else {
280                                         *mode =
281                                 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
282                                         *aead_xform = xform;
283                                         return 0;
284                                 }
285                 } else {
286                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
287                                 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
288                                 *aead_xform = xform;
289                                 return 0;
290                         }
291                         *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
292                         *aead_xform = xform;
293                         return 0;
294                 }
295         }
296
297         *mode = IPSEC_MB_OP_NOT_SUPPORTED;
298         return -ENOTSUP;
299 }
300
301 /** Device creation function */
302 int
303 ipsec_mb_create(struct rte_vdev_device *vdev,
304         enum ipsec_mb_pmd_types pmd_type);
305
306 /** Device remove function */
307 int
308 ipsec_mb_remove(struct rte_vdev_device *vdev);
309
310 /** Configure queue pair PMD type specific data */
311 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
312
313 /** Configure session PMD type specific data */
314 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
315                 void *session_private,
316                 const struct rte_crypto_sym_xform *xform);
317
318 /** Configure internals PMD type specific data */
319 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
320
321 /** Per PMD type operation and data */
322 struct ipsec_mb_internals {
323         uint8_t is_configured;
324         dequeue_pkt_burst_t dequeue_burst;
325         ipsec_mb_dev_configure_t dev_config;
326         ipsec_mb_queue_pair_configure_t queue_pair_configure;
327         ipsec_mb_session_configure_t session_configure;
328         const struct rte_cryptodev_capabilities *caps;
329         struct rte_cryptodev_ops *ops;
330         struct rte_security_ops *security_ops;
331         uint64_t feature_flags;
332         uint32_t session_priv_size;
333         uint32_t qp_priv_size;
334         uint32_t internals_priv_size;
335 };
336
337 /** Global PMD type specific data */
338 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
339
340 int
341 ipsec_mb_config(struct rte_cryptodev *dev,
342         struct rte_cryptodev_config *config);
343
344 int
345 ipsec_mb_start(struct rte_cryptodev *dev);
346
347 void
348 ipsec_mb_stop(struct rte_cryptodev *dev);
349
350 int
351 ipsec_mb_close(struct rte_cryptodev *dev);
352
353 void
354 ipsec_mb_stats_get(struct rte_cryptodev *dev,
355                 struct rte_cryptodev_stats *stats);
356
357 void
358 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
359
360 void
361 ipsec_mb_info_get(struct rte_cryptodev *dev,
362                 struct rte_cryptodev_info *dev_info);
363
364 int
365 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
366
367 int
368 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
369
370 int
371 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
372                                  const struct rte_cryptodev_qp_conf *qp_conf,
373                                  int socket_id);
374
375 /** Returns the size of the aesni multi-buffer session structure */
376 unsigned
377 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
378
379 /** Configure an aesni multi-buffer session from a crypto xform chain */
380 int ipsec_mb_sym_session_configure(
381         struct rte_cryptodev *dev,
382         struct rte_crypto_sym_xform *xform,
383         struct rte_cryptodev_sym_session *sess,
384         struct rte_mempool *mempool);
385
386 /** Clear the memory of session so it does not leave key material behind */
387 void
388 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
389                                 struct rte_cryptodev_sym_session *sess);
390
391 /** Get session from op. If sessionless create a session */
392 static __rte_always_inline void *
393 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
394 {
395         void *sess = NULL;
396         uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
397         struct rte_crypto_sym_op *sym_op = op->sym;
398         uint8_t sess_type = op->sess_type;
399         void *_sess;
400         void *_sess_private_data = NULL;
401         struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
402
403         switch (sess_type) {
404         case RTE_CRYPTO_OP_WITH_SESSION:
405                 if (likely(sym_op->session != NULL))
406                         sess = get_sym_session_private_data(sym_op->session,
407                                                             driver_id);
408         break;
409         case RTE_CRYPTO_OP_SESSIONLESS:
410                 if (!qp->sess_mp ||
411                     rte_mempool_get(qp->sess_mp, (void **)&_sess))
412                         return NULL;
413
414                 if (!qp->sess_mp_priv ||
415                     rte_mempool_get(qp->sess_mp_priv,
416                                         (void **)&_sess_private_data))
417                         return NULL;
418
419                 sess = _sess_private_data;
420                 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
421                                 sess, sym_op->xform) != 0)) {
422                         rte_mempool_put(qp->sess_mp, _sess);
423                         rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
424                         sess = NULL;
425                 }
426
427                 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
428                 set_sym_session_private_data(sym_op->session, driver_id,
429                                              _sess_private_data);
430         break;
431         default:
432                 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
433         }
434
435         if (unlikely(sess == NULL))
436                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
437
438         return sess;
439 }
440
441 #endif /* _IPSEC_MB_PRIVATE_H_ */