1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
24 enum ipsec_mb_vector_mode {
25 IPSEC_MB_NOT_SUPPORTED = 0,
32 extern enum ipsec_mb_vector_mode vector_mode;
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
43 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
44 /**< IPSEC Multi buffer PMD kasumi device name */
46 /** PMD LOGTYPE DRIVER, common to all PMDs */
47 extern int ipsec_mb_logtype_driver;
48 #define IPSEC_MB_LOG(level, fmt, ...) \
49 rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver, \
50 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
52 /** All supported device types */
53 enum ipsec_mb_pmd_types {
54 IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
55 IPSEC_MB_PMD_TYPE_AESNI_GCM,
56 IPSEC_MB_PMD_TYPE_KASUMI,
60 /** Crypto operations */
61 enum ipsec_mb_operation {
62 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
63 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
64 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
65 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
66 IPSEC_MB_OP_ENCRYPT_ONLY,
67 IPSEC_MB_OP_DECRYPT_ONLY,
68 IPSEC_MB_OP_HASH_GEN_ONLY,
69 IPSEC_MB_OP_HASH_VERIFY_ONLY,
70 IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
71 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
72 IPSEC_MB_OP_NOT_SUPPORTED
75 extern uint8_t pmd_driver_id_aesni_mb;
76 extern uint8_t pmd_driver_id_aesni_gcm;
77 extern uint8_t pmd_driver_id_kasumi;
79 /** Helper function. Gets driver ID based on PMD type */
80 static __rte_always_inline uint8_t
81 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
84 case IPSEC_MB_PMD_TYPE_AESNI_MB:
85 return pmd_driver_id_aesni_mb;
86 case IPSEC_MB_PMD_TYPE_AESNI_GCM:
87 return pmd_driver_id_aesni_gcm;
88 case IPSEC_MB_PMD_TYPE_KASUMI:
89 return pmd_driver_id_kasumi;
96 /** Common private data structure for each PMD */
97 struct ipsec_mb_dev_private {
98 enum ipsec_mb_pmd_types pmd_type;
100 uint32_t max_nb_queue_pairs;
101 /**< Max number of queue pairs supported by device */
102 __extension__ uint8_t priv[0];
105 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
108 /**< Queue Pair Identifier */
109 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
110 struct rte_ring *ingress_queue;
111 /**< Ring for placing operations ready for processing */
112 struct rte_mempool *sess_mp;
113 /**< Session Mempool */
114 struct rte_mempool *sess_mp_priv;
115 /**< Session Private Data Mempool */
116 struct rte_cryptodev_stats stats;
117 /**< Queue pair statistics */
118 enum ipsec_mb_pmd_types pmd_type;
121 /**< Index of the next
122 * slot to be used in temp_digests,
123 * to store the digest for a given operation
126 /* Multi buffer manager */
127 const struct rte_memzone *mb_mgr_mz;
128 /* Shared memzone for storing mb_mgr */
129 __extension__ uint8_t additional_data[0];
130 /**< Storing PMD specific additional data */
133 static __rte_always_inline void *
134 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
136 return (void *)qp->additional_data;
139 /** Helper function. Allocates job manager */
140 static __rte_always_inline IMB_MGR *
141 alloc_init_mb_mgr(void)
143 IMB_MGR *mb_mgr = alloc_mb_mgr(0);
145 if (unlikely(mb_mgr == NULL)) {
146 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
150 init_mb_mgr_auto(mb_mgr, NULL);
155 /** Helper function. Gets per thread job manager */
156 static __rte_always_inline IMB_MGR *
157 get_per_thread_mb_mgr(void)
159 if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
160 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
162 return RTE_PER_LCORE(mb_mgr);
165 /** Helper function. Gets mode and chained xforms from the xform */
166 static __rte_always_inline int
167 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
168 enum ipsec_mb_operation *mode,
169 const struct rte_crypto_sym_xform **auth_xform,
170 const struct rte_crypto_sym_xform **cipher_xform,
171 const struct rte_crypto_sym_xform **aead_xform)
173 const struct rte_crypto_sym_xform *next = xform->next;
176 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
180 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
182 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
183 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
184 *cipher_xform = xform;
188 *mode = IPSEC_MB_OP_DECRYPT_ONLY;
189 *cipher_xform = xform;
194 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
195 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
199 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
200 if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
201 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
205 *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
206 *cipher_xform = xform;
207 *auth_xform = xform->next;
210 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
211 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
215 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
216 *cipher_xform = xform;
217 *auth_xform = xform->next;
221 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
223 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
224 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
226 *cipher_xform = NULL;
229 *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
231 *cipher_xform = NULL;
235 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
236 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
240 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
241 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
242 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
246 *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
248 *cipher_xform = xform->next;
251 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
252 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
256 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
258 *cipher_xform = xform->next;
262 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
263 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
265 * CCM requires to hash first and cipher later
268 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
269 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
274 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
279 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
280 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
284 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
290 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
294 /** Device creation function */
296 ipsec_mb_create(struct rte_vdev_device *vdev,
297 enum ipsec_mb_pmd_types pmd_type);
299 /** Device remove function */
301 ipsec_mb_remove(struct rte_vdev_device *vdev);
303 /** Configure queue pair PMD type specific data */
304 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
306 /** Configure session PMD type specific data */
307 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
308 void *session_private,
309 const struct rte_crypto_sym_xform *xform);
311 /** Configure internals PMD type specific data */
312 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
314 /** Per PMD type operation and data */
315 struct ipsec_mb_internals {
316 uint8_t is_configured;
317 dequeue_pkt_burst_t dequeue_burst;
318 ipsec_mb_dev_configure_t dev_config;
319 ipsec_mb_queue_pair_configure_t queue_pair_configure;
320 ipsec_mb_session_configure_t session_configure;
321 const struct rte_cryptodev_capabilities *caps;
322 struct rte_cryptodev_ops *ops;
323 struct rte_security_ops *security_ops;
324 uint64_t feature_flags;
325 uint32_t session_priv_size;
326 uint32_t qp_priv_size;
327 uint32_t internals_priv_size;
330 /** Global PMD type specific data */
331 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
334 ipsec_mb_config(struct rte_cryptodev *dev,
335 struct rte_cryptodev_config *config);
338 ipsec_mb_start(struct rte_cryptodev *dev);
341 ipsec_mb_stop(struct rte_cryptodev *dev);
344 ipsec_mb_close(struct rte_cryptodev *dev);
347 ipsec_mb_stats_get(struct rte_cryptodev *dev,
348 struct rte_cryptodev_stats *stats);
351 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
354 ipsec_mb_info_get(struct rte_cryptodev *dev,
355 struct rte_cryptodev_info *dev_info);
358 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
361 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
364 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
365 const struct rte_cryptodev_qp_conf *qp_conf,
368 /** Returns the size of the aesni multi-buffer session structure */
370 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
372 /** Configure an aesni multi-buffer session from a crypto xform chain */
373 int ipsec_mb_sym_session_configure(
374 struct rte_cryptodev *dev,
375 struct rte_crypto_sym_xform *xform,
376 struct rte_cryptodev_sym_session *sess,
377 struct rte_mempool *mempool);
379 /** Clear the memory of session so it does not leave key material behind */
381 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
382 struct rte_cryptodev_sym_session *sess);
384 /** Get session from op. If sessionless create a session */
385 static __rte_always_inline void *
386 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
389 uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
390 struct rte_crypto_sym_op *sym_op = op->sym;
391 uint8_t sess_type = op->sess_type;
393 void *_sess_private_data = NULL;
394 struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
397 case RTE_CRYPTO_OP_WITH_SESSION:
398 if (likely(sym_op->session != NULL))
399 sess = get_sym_session_private_data(sym_op->session,
402 case RTE_CRYPTO_OP_SESSIONLESS:
404 rte_mempool_get(qp->sess_mp, (void **)&_sess))
407 if (!qp->sess_mp_priv ||
408 rte_mempool_get(qp->sess_mp_priv,
409 (void **)&_sess_private_data))
412 sess = _sess_private_data;
413 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
414 sess, sym_op->xform) != 0)) {
415 rte_mempool_put(qp->sess_mp, _sess);
416 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
420 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
421 set_sym_session_private_data(sym_op->session, driver_id,
425 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
428 if (unlikely(sess == NULL))
429 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
434 #endif /* _IPSEC_MB_PRIVATE_H_ */