1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #ifndef _IPSEC_MB_PRIVATE_H_
6 #define _IPSEC_MB_PRIVATE_H_
8 #include <intel-ipsec-mb.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_bus_vdev.h>
12 #if defined(RTE_LIB_SECURITY)
13 #define IPSEC_MB_DOCSIS_SEC_ENABLED 1
14 #include <rte_security.h>
15 #include <rte_security_driver.h>
18 /* Maximum length for digest */
19 #define DIGEST_LENGTH_MAX 64
21 /* Maximum length for memzone name */
22 #define IPSEC_MB_MAX_MZ_NAME 32
24 enum ipsec_mb_vector_mode {
25 IPSEC_MB_NOT_SUPPORTED = 0,
32 extern enum ipsec_mb_vector_mode vector_mode;
34 /** IMB_MGR instances, one per thread */
35 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
37 /** PMD LOGTYPE DRIVER, common to all PMDs */
38 extern int ipsec_mb_logtype_driver;
39 #define IPSEC_MB_LOG(level, fmt, ...) \
40 rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver, \
41 "%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
43 /** All supported device types */
44 enum ipsec_mb_pmd_types {
48 /** Crypto operations */
49 enum ipsec_mb_operation {
50 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
51 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
52 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
53 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
54 IPSEC_MB_OP_ENCRYPT_ONLY,
55 IPSEC_MB_OP_DECRYPT_ONLY,
56 IPSEC_MB_OP_HASH_GEN_ONLY,
57 IPSEC_MB_OP_HASH_VERIFY_ONLY,
58 IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
59 IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
60 IPSEC_MB_OP_NOT_SUPPORTED
63 /** Helper function. Gets driver ID based on PMD type */
64 static __rte_always_inline uint8_t
65 ipsec_mb_get_driver_id(__rte_unused enum ipsec_mb_pmd_types pmd_type)
70 /** Common private data structure for each PMD */
71 struct ipsec_mb_dev_private {
72 enum ipsec_mb_pmd_types pmd_type;
74 uint32_t max_nb_queue_pairs;
75 /**< Max number of queue pairs supported by device */
76 __extension__ uint8_t priv[0];
79 /** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
82 /**< Queue Pair Identifier */
83 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
84 struct rte_ring *ingress_queue;
85 /**< Ring for placing operations ready for processing */
86 struct rte_mempool *sess_mp;
87 /**< Session Mempool */
88 struct rte_mempool *sess_mp_priv;
89 /**< Session Private Data Mempool */
90 struct rte_cryptodev_stats stats;
91 /**< Queue pair statistics */
92 enum ipsec_mb_pmd_types pmd_type;
95 /**< Index of the next
96 * slot to be used in temp_digests,
97 * to store the digest for a given operation
100 /* Multi buffer manager */
101 const struct rte_memzone *mb_mgr_mz;
102 /* Shared memzone for storing mb_mgr */
103 __extension__ uint8_t additional_data[0];
104 /**< Storing PMD specific additional data */
107 static __rte_always_inline void *
108 ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
110 return (void *)qp->additional_data;
113 /** Helper function. Allocates job manager */
114 static __rte_always_inline IMB_MGR *
115 alloc_init_mb_mgr(void)
117 IMB_MGR *mb_mgr = alloc_mb_mgr(0);
119 if (unlikely(mb_mgr == NULL)) {
120 IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
124 init_mb_mgr_auto(mb_mgr, NULL);
129 /** Helper function. Gets per thread job manager */
130 static __rte_always_inline IMB_MGR *
131 get_per_thread_mb_mgr(void)
133 if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
134 RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
136 return RTE_PER_LCORE(mb_mgr);
139 /** Device creation function */
141 ipsec_mb_create(struct rte_vdev_device *vdev,
142 enum ipsec_mb_pmd_types pmd_type);
144 /** Device remove function */
146 ipsec_mb_remove(struct rte_vdev_device *vdev);
148 /** Configure queue pair PMD type specific data */
149 typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
151 /** Configure session PMD type specific data */
152 typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
153 void *session_private,
154 const struct rte_crypto_sym_xform *xform);
156 /** Configure internals PMD type specific data */
157 typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
159 /** Per PMD type operation and data */
160 struct ipsec_mb_internals {
161 uint8_t is_configured;
162 dequeue_pkt_burst_t dequeue_burst;
163 ipsec_mb_dev_configure_t dev_config;
164 ipsec_mb_queue_pair_configure_t queue_pair_configure;
165 ipsec_mb_session_configure_t session_configure;
166 const struct rte_cryptodev_capabilities *caps;
167 struct rte_cryptodev_ops *ops;
168 struct rte_security_ops *security_ops;
169 uint64_t feature_flags;
170 uint32_t session_priv_size;
171 uint32_t qp_priv_size;
172 uint32_t internals_priv_size;
175 /** Global PMD type specific data */
176 extern struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
179 ipsec_mb_config(struct rte_cryptodev *dev,
180 struct rte_cryptodev_config *config);
183 ipsec_mb_start(struct rte_cryptodev *dev);
186 ipsec_mb_stop(struct rte_cryptodev *dev);
189 ipsec_mb_close(struct rte_cryptodev *dev);
192 ipsec_mb_stats_get(struct rte_cryptodev *dev,
193 struct rte_cryptodev_stats *stats);
196 ipsec_mb_stats_reset(struct rte_cryptodev *dev);
199 ipsec_mb_info_get(struct rte_cryptodev *dev,
200 struct rte_cryptodev_info *dev_info);
203 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
206 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, struct ipsec_mb_qp *qp);
209 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
210 const struct rte_cryptodev_qp_conf *qp_conf,
213 /** Returns the size of the aesni multi-buffer session structure */
215 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev);
217 /** Configure an aesni multi-buffer session from a crypto xform chain */
218 int ipsec_mb_sym_session_configure(
219 struct rte_cryptodev *dev,
220 struct rte_crypto_sym_xform *xform,
221 struct rte_cryptodev_sym_session *sess,
222 struct rte_mempool *mempool);
224 /** Clear the memory of session so it does not leave key material behind */
226 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev,
227 struct rte_cryptodev_sym_session *sess);
229 /** Get session from op. If sessionless create a session */
230 static __rte_always_inline void *
231 ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
234 uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
235 struct rte_crypto_sym_op *sym_op = op->sym;
236 uint8_t sess_type = op->sess_type;
238 void *_sess_private_data = NULL;
239 struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
242 case RTE_CRYPTO_OP_WITH_SESSION:
243 if (likely(sym_op->session != NULL))
244 sess = get_sym_session_private_data(sym_op->session,
247 case RTE_CRYPTO_OP_SESSIONLESS:
249 rte_mempool_get(qp->sess_mp, (void **)&_sess))
252 if (!qp->sess_mp_priv ||
253 rte_mempool_get(qp->sess_mp_priv,
254 (void **)&_sess_private_data))
257 sess = _sess_private_data;
258 if (unlikely(pmd_data->session_configure(qp->mb_mgr,
259 sess, sym_op->xform) != 0)) {
260 rte_mempool_put(qp->sess_mp, _sess);
261 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
265 sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
266 set_sym_session_private_data(sym_op->session, driver_id,
270 IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
273 if (unlikely(sess == NULL))
274 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
279 #endif /* _IPSEC_MB_PRIVATE_H_ */