1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
24 enum ipsec_mb_vector_mode {
25 IPSEC_MB_NOT_SUPPORTED = 0,
32 extern enum ipsec_mb_vector_mode vector_mode;
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
43 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
44 /**< IPSEC Multi buffer PMD kasumi device name */
46 #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
47 /**< IPSEC Multi buffer PMD snow3g device name */
49 /** PMD LOGTYPE DRIVER, common to all PMDs */
50 extern int ipsec_mb_logtype_driver;
51 #define IPSEC_MB_LOG(level, fmt, ...) \
52 rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver, \
53 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
55 /** All supported device types */
56 enum ipsec_mb_pmd_types {
57 IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
58 IPSEC_MB_PMD_TYPE_AESNI_GCM,
59 IPSEC_MB_PMD_TYPE_KASUMI,
60 IPSEC_MB_PMD_TYPE_SNOW3G,
64 /** Crypto operations */
65 enum ipsec_mb_operation {
66 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
67 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
68 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
69 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
70 IPSEC_MB_OP_ENCRYPT_ONLY,
71 IPSEC_MB_OP_DECRYPT_ONLY,
72 IPSEC_MB_OP_HASH_GEN_ONLY,
73 IPSEC_MB_OP_HASH_VERIFY_ONLY,
74 IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
75 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
76 IPSEC_MB_OP_NOT_SUPPORTED
79 extern uint8_t pmd_driver_id_aesni_mb;
80 extern uint8_t pmd_driver_id_aesni_gcm;
81 extern uint8_t pmd_driver_id_kasumi;
82 extern uint8_t pmd_driver_id_snow3g;
84 /** Helper function. Gets driver ID based on PMD type */
85 static __rte_always_inline uint8_t
86 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
89 case IPSEC_MB_PMD_TYPE_AESNI_MB:
90 return pmd_driver_id_aesni_mb;
91 case IPSEC_MB_PMD_TYPE_AESNI_GCM:
92 return pmd_driver_id_aesni_gcm;
93 case IPSEC_MB_PMD_TYPE_KASUMI:
94 return pmd_driver_id_kasumi;
95 case IPSEC_MB_PMD_TYPE_SNOW3G:
96 return pmd_driver_id_snow3g;
103 /** Common private data structure for each PMD */
104 struct ipsec_mb_dev_private {
105 enum ipsec_mb_pmd_types pmd_type;
107 uint32_t max_nb_queue_pairs;
108 /**< Max number of queue pairs supported by device */
109 __extension__ uint8_t priv[0];
112 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
115 /**< Queue Pair Identifier */
116 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
117 struct rte_ring *ingress_queue;
118 /**< Ring for placing operations ready for processing */
119 struct rte_mempool *sess_mp;
120 /**< Session Mempool */
121 struct rte_mempool *sess_mp_priv;
122 /**< Session Private Data Mempool */
123 struct rte_cryptodev_stats stats;
124 /**< Queue pair statistics */
125 enum ipsec_mb_pmd_types pmd_type;
128 /**< Index of the next
129 * slot to be used in temp_digests,
130 * to store the digest for a given operation
133 /* Multi buffer manager */
134 const struct rte_memzone *mb_mgr_mz;
135 /* Shared memzone for storing mb_mgr */
136 __extension__ uint8_t additional_data[0];
137 /**< Storing PMD specific additional data */
140 static __rte_always_inline void *
141 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
143 return (void *)qp->additional_data;
146 /** Helper function. Allocates job manager */
147 static __rte_always_inline IMB_MGR *
148 alloc_init_mb_mgr(void)
150 IMB_MGR *mb_mgr = alloc_mb_mgr(0);
152 if (unlikely(mb_mgr == NULL)) {
153 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
157 init_mb_mgr_auto(mb_mgr, NULL);
162 /** Helper function. Gets per thread job manager */
163 static __rte_always_inline IMB_MGR *
164 get_per_thread_mb_mgr(void)
166 if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
167 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
169 return RTE_PER_LCORE(mb_mgr);
172 /** Helper function. Gets mode and chained xforms from the xform */
173 static __rte_always_inline int
174 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
175 enum ipsec_mb_operation *mode,
176 const struct rte_crypto_sym_xform **auth_xform,
177 const struct rte_crypto_sym_xform **cipher_xform,
178 const struct rte_crypto_sym_xform **aead_xform)
180 const struct rte_crypto_sym_xform *next = xform->next;
183 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
187 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
189 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
190 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
191 *cipher_xform = xform;
195 *mode = IPSEC_MB_OP_DECRYPT_ONLY;
196 *cipher_xform = xform;
201 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
202 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
206 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
207 if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
208 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
212 *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
213 *cipher_xform = xform;
214 *auth_xform = xform->next;
217 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
218 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
222 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
223 *cipher_xform = xform;
224 *auth_xform = xform->next;
228 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
230 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
231 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
233 *cipher_xform = NULL;
236 *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
238 *cipher_xform = NULL;
242 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
243 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
247 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
248 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
249 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
253 *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
255 *cipher_xform = xform->next;
258 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
259 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
263 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
265 *cipher_xform = xform->next;
269 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
270 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
272 * CCM requires to hash first and cipher later
275 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
276 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
281 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
286 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
287 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
291 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
297 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
301 /** Device creation function */
303 ipsec_mb_create(struct rte_vdev_device *vdev,
304 enum ipsec_mb_pmd_types pmd_type);
306 /** Device remove function */
308 ipsec_mb_remove(struct rte_vdev_device *vdev);
310 /** Configure queue pair PMD type specific data */
311 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
313 /** Configure session PMD type specific data */
314 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
315 void *session_private,
316 const struct rte_crypto_sym_xform *xform);
318 /** Configure internals PMD type specific data */
319 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
321 /** Per PMD type operation and data */
322 struct ipsec_mb_internals {
323 uint8_t is_configured;
324 dequeue_pkt_burst_t dequeue_burst;
325 ipsec_mb_dev_configure_t dev_config;
326 ipsec_mb_queue_pair_configure_t queue_pair_configure;
327 ipsec_mb_session_configure_t session_configure;
328 const struct rte_cryptodev_capabilities *caps;
329 struct rte_cryptodev_ops *ops;
330 struct rte_security_ops *security_ops;
331 uint64_t feature_flags;
332 uint32_t session_priv_size;
333 uint32_t qp_priv_size;
334 uint32_t internals_priv_size;
337 /** Global PMD type specific data */
338 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
341 ipsec_mb_config(struct rte_cryptodev *dev,
342 struct rte_cryptodev_config *config);
345 ipsec_mb_start(struct rte_cryptodev *dev);
348 ipsec_mb_stop(struct rte_cryptodev *dev);
351 ipsec_mb_close(struct rte_cryptodev *dev);
354 ipsec_mb_stats_get(struct rte_cryptodev *dev,
355 struct rte_cryptodev_stats *stats);
358 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
361 ipsec_mb_info_get(struct rte_cryptodev *dev,
362 struct rte_cryptodev_info *dev_info);
365 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
368 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
371 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
372 const struct rte_cryptodev_qp_conf *qp_conf,
375 /** Returns the size of the aesni multi-buffer session structure */
377 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
379 /** Configure an aesni multi-buffer session from a crypto xform chain */
380 int ipsec_mb_sym_session_configure(
381 struct rte_cryptodev *dev,
382 struct rte_crypto_sym_xform *xform,
383 struct rte_cryptodev_sym_session *sess,
384 struct rte_mempool *mempool);
386 /** Clear the memory of session so it does not leave key material behind */
388 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
389 struct rte_cryptodev_sym_session *sess);
391 /** Get session from op. If sessionless create a session */
392 static __rte_always_inline void *
393 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
396 uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
397 struct rte_crypto_sym_op *sym_op = op->sym;
398 uint8_t sess_type = op->sess_type;
400 void *_sess_private_data = NULL;
401 struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
404 case RTE_CRYPTO_OP_WITH_SESSION:
405 if (likely(sym_op->session != NULL))
406 sess = get_sym_session_private_data(sym_op->session,
409 case RTE_CRYPTO_OP_SESSIONLESS:
411 rte_mempool_get(qp->sess_mp, (void **)&_sess))
414 if (!qp->sess_mp_priv ||
415 rte_mempool_get(qp->sess_mp_priv,
416 (void **)&_sess_private_data))
419 sess = _sess_private_data;
420 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
421 sess, sym_op->xform) != 0)) {
422 rte_mempool_put(qp->sess_mp, _sess);
423 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
427 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
428 set_sym_session_private_data(sym_op->session, driver_id,
432 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
435 if (unlikely(sess == NULL))
436 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
441 #endif /* _IPSEC_MB_PRIVATE_H_ */