1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
24 enum ipsec_mb_vector_mode {
25 IPSEC_MB_NOT_SUPPORTED = 0,
32 extern enum ipsec_mb_vector_mode vector_mode;
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
37 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
38 /**< IPSEC Multi buffer aesni_mb PMD device name */
40 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
41 /**< IPSEC Multi buffer PMD aesni_gcm device name */
43 /** PMD LOGTYPE DRIVER, common to all PMDs */
44 extern int ipsec_mb_logtype_driver;
45 #define IPSEC_MB_LOG(level, fmt, ...) \
46 rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver, \
47 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
49 /** All supported device types */
50 enum ipsec_mb_pmd_types {
51 IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
52 IPSEC_MB_PMD_TYPE_AESNI_GCM,
56 /** Crypto operations */
57 enum ipsec_mb_operation {
58 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
59 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
60 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
61 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
62 IPSEC_MB_OP_ENCRYPT_ONLY,
63 IPSEC_MB_OP_DECRYPT_ONLY,
64 IPSEC_MB_OP_HASH_GEN_ONLY,
65 IPSEC_MB_OP_HASH_VERIFY_ONLY,
66 IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
67 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
68 IPSEC_MB_OP_NOT_SUPPORTED
71 extern uint8_t pmd_driver_id_aesni_mb;
72 extern uint8_t pmd_driver_id_aesni_gcm;
74 /** Helper function. Gets driver ID based on PMD type */
75 static __rte_always_inline uint8_t
76 ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
79 case IPSEC_MB_PMD_TYPE_AESNI_MB:
80 return pmd_driver_id_aesni_mb;
81 case IPSEC_MB_PMD_TYPE_AESNI_GCM:
82 return pmd_driver_id_aesni_gcm;
89 /** Common private data structure for each PMD */
90 struct ipsec_mb_dev_private {
91 enum ipsec_mb_pmd_types pmd_type;
93 uint32_t max_nb_queue_pairs;
94 /**< Max number of queue pairs supported by device */
95 __extension__ uint8_t priv[0];
98 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
101 /**< Queue Pair Identifier */
102 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
103 struct rte_ring *ingress_queue;
104 /**< Ring for placing operations ready for processing */
105 struct rte_mempool *sess_mp;
106 /**< Session Mempool */
107 struct rte_mempool *sess_mp_priv;
108 /**< Session Private Data Mempool */
109 struct rte_cryptodev_stats stats;
110 /**< Queue pair statistics */
111 enum ipsec_mb_pmd_types pmd_type;
114 /**< Index of the next
115 * slot to be used in temp_digests,
116 * to store the digest for a given operation
119 /* Multi buffer manager */
120 const struct rte_memzone *mb_mgr_mz;
121 /* Shared memzone for storing mb_mgr */
122 __extension__ uint8_t additional_data[0];
123 /**< Storing PMD specific additional data */
126 static __rte_always_inline void *
127 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
129 return (void *)qp->additional_data;
132 /** Helper function. Allocates job manager */
133 static __rte_always_inline IMB_MGR *
134 alloc_init_mb_mgr(void)
136 IMB_MGR *mb_mgr = alloc_mb_mgr(0);
138 if (unlikely(mb_mgr == NULL)) {
139 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
143 init_mb_mgr_auto(mb_mgr, NULL);
148 /** Helper function. Gets per thread job manager */
149 static __rte_always_inline IMB_MGR *
150 get_per_thread_mb_mgr(void)
152 if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
153 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
155 return RTE_PER_LCORE(mb_mgr);
158 /** Helper function. Gets mode and chained xforms from the xform */
159 static __rte_always_inline int
160 ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
161 enum ipsec_mb_operation *mode,
162 const struct rte_crypto_sym_xform **auth_xform,
163 const struct rte_crypto_sym_xform **cipher_xform,
164 const struct rte_crypto_sym_xform **aead_xform)
166 const struct rte_crypto_sym_xform *next = xform->next;
169 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
173 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
175 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
176 *mode = IPSEC_MB_OP_ENCRYPT_ONLY;
177 *cipher_xform = xform;
181 *mode = IPSEC_MB_OP_DECRYPT_ONLY;
182 *cipher_xform = xform;
187 if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
188 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
192 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
193 if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
194 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
198 *mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
199 *cipher_xform = xform;
200 *auth_xform = xform->next;
203 if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
204 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
208 *mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
209 *cipher_xform = xform;
210 *auth_xform = xform->next;
214 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
216 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
217 *mode = IPSEC_MB_OP_HASH_GEN_ONLY;
219 *cipher_xform = NULL;
222 *mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
224 *cipher_xform = NULL;
228 if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
229 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
233 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
234 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
235 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
239 *mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
241 *cipher_xform = xform->next;
244 if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
245 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
249 *mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
251 *cipher_xform = xform->next;
255 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
256 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
258 * CCM requires to hash first and cipher later
261 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
262 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
267 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
272 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
273 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
277 *mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
283 *mode = IPSEC_MB_OP_NOT_SUPPORTED;
287 /** Device creation function */
289 ipsec_mb_create(struct rte_vdev_device *vdev,
290 enum ipsec_mb_pmd_types pmd_type);
292 /** Device remove function */
294 ipsec_mb_remove(struct rte_vdev_device *vdev);
296 /** Configure queue pair PMD type specific data */
297 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
299 /** Configure session PMD type specific data */
300 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
301 void *session_private,
302 const struct rte_crypto_sym_xform *xform);
304 /** Configure internals PMD type specific data */
305 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
307 /** Per PMD type operation and data */
308 struct ipsec_mb_internals {
309 uint8_t is_configured;
310 dequeue_pkt_burst_t dequeue_burst;
311 ipsec_mb_dev_configure_t dev_config;
312 ipsec_mb_queue_pair_configure_t queue_pair_configure;
313 ipsec_mb_session_configure_t session_configure;
314 const struct rte_cryptodev_capabilities *caps;
315 struct rte_cryptodev_ops *ops;
316 struct rte_security_ops *security_ops;
317 uint64_t feature_flags;
318 uint32_t session_priv_size;
319 uint32_t qp_priv_size;
320 uint32_t internals_priv_size;
323 /** Global PMD type specific data */
324 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
327 ipsec_mb_config(struct rte_cryptodev *dev,
328 struct rte_cryptodev_config *config);
331 ipsec_mb_start(struct rte_cryptodev *dev);
334 ipsec_mb_stop(struct rte_cryptodev *dev);
337 ipsec_mb_close(struct rte_cryptodev *dev);
340 ipsec_mb_stats_get(struct rte_cryptodev *dev,
341 struct rte_cryptodev_stats *stats);
344 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
347 ipsec_mb_info_get(struct rte_cryptodev *dev,
348 struct rte_cryptodev_info *dev_info);
351 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
354 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
357 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
358 const struct rte_cryptodev_qp_conf *qp_conf,
361 /** Returns the size of the aesni multi-buffer session structure */
363 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
365 /** Configure an aesni multi-buffer session from a crypto xform chain */
366 int ipsec_mb_sym_session_configure(
367 struct rte_cryptodev *dev,
368 struct rte_crypto_sym_xform *xform,
369 struct rte_cryptodev_sym_session *sess,
370 struct rte_mempool *mempool);
372 /** Clear the memory of session so it does not leave key material behind */
374 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
375 struct rte_cryptodev_sym_session *sess);
377 /** Get session from op. If sessionless create a session */
378 static __rte_always_inline void *
379 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
382 uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
383 struct rte_crypto_sym_op *sym_op = op->sym;
384 uint8_t sess_type = op->sess_type;
386 void *_sess_private_data = NULL;
387 struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
390 case RTE_CRYPTO_OP_WITH_SESSION:
391 if (likely(sym_op->session != NULL))
392 sess = get_sym_session_private_data(sym_op->session,
395 case RTE_CRYPTO_OP_SESSIONLESS:
397 rte_mempool_get(qp->sess_mp, (void **)&_sess))
400 if (!qp->sess_mp_priv ||
401 rte_mempool_get(qp->sess_mp_priv,
402 (void **)&_sess_private_data))
405 sess = _sess_private_data;
406 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
407 sess, sym_op->xform) != 0)) {
408 rte_mempool_put(qp->sess_mp, _sess);
409 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
413 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
414 set_sym_session_private_data(sym_op->session, driver_id,
418 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
421 if (unlikely(sess == NULL))
422 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
427 #endif /* _IPSEC_MB_PRIVATE_H_ */