1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef _CNXK_CRYPTODEV_OPS_H_
6 #define _CNXK_CRYPTODEV_OPS_H_
8 #include <rte_cryptodev.h>
9 #include <rte_event_crypto_adapter.h>
13 #define CNXK_CPT_MIN_HEADROOM_REQ 24
14 #define CNXK_CPT_MIN_TAILROOM_REQ 102
16 /* Default command timeout in seconds */
17 #define DEFAULT_COMMAND_TIMEOUT 4
19 #define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
21 /* Macros to form words in CPT instruction */
22 #define CNXK_CPT_INST_W2(tag, tt, grp, rvu_pf_func) \
23 ((tag) | ((uint64_t)(tt) << 32) | ((uint64_t)(grp) << 34) | \
24 ((uint64_t)(rvu_pf_func) << 48))
25 #define CNXK_CPT_INST_W3(qord, wqe_ptr) \
26 (qord | ((uintptr_t)(wqe_ptr) >> 3) << 3)
28 struct cpt_qp_meta_info {
29 struct rte_mempool *pool;
37 CNXK_CPT_CIPHER_ENC_AUTH_GEN,
38 CNXK_CPT_AUTH_VRFY_CIPHER_DEC,
39 CNXK_CPT_AUTH_GEN_CIPHER_ENC,
40 CNXK_CPT_CIPHER_DEC_AUTH_VRFY
43 #define CPT_OP_FLAGS_METABUF (1 << 1)
44 #define CPT_OP_FLAGS_AUTH_VERIFY (1 << 0)
45 #define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
47 struct cpt_inflight_req {
49 struct rte_crypto_op *cop;
55 struct pending_queue {
56 /** Array of pending requests */
57 struct cpt_inflight_req *req_queue;
58 /** Head of the queue to be used for enqueue */
60 /** Tail of the queue to be used for dequeue */
62 /** Pending queue mask */
64 /** Timeout to track h/w being unresponsive */
68 struct crypto_adpter_info {
70 /**< Set if queue pair is added to crypto adapter */
71 struct rte_mempool *req_mp;
72 /**< CPT inflight request mempool */
78 struct pending_queue pend_q;
80 struct rte_mempool *sess_mp;
81 /**< Session mempool */
82 struct rte_mempool *sess_mp_priv;
83 /**< Session private data mempool */
84 struct cpt_qp_meta_info meta_info;
85 /**< Metabuf info required to support operations on the queue pair */
86 struct roc_cpt_lmtline lmtline;
87 /**< Lmtline information */
88 struct crypto_adpter_info ca;
89 /**< Crypto adapter related info */
92 int cnxk_cpt_dev_config(struct rte_cryptodev *dev,
93 struct rte_cryptodev_config *conf);
95 int cnxk_cpt_dev_start(struct rte_cryptodev *dev);
97 void cnxk_cpt_dev_stop(struct rte_cryptodev *dev);
99 int cnxk_cpt_dev_close(struct rte_cryptodev *dev);
101 void cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
102 struct rte_cryptodev_info *info);
104 int cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
105 const struct rte_cryptodev_qp_conf *conf,
106 int socket_id __rte_unused);
108 int cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
110 unsigned int cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev);
112 int cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
113 struct rte_crypto_sym_xform *xform,
114 struct rte_cryptodev_sym_session *sess,
115 struct rte_mempool *pool);
117 int sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
118 struct rte_crypto_sym_xform *xform,
119 struct rte_cryptodev_sym_session *sess,
120 struct rte_mempool *pool);
122 void cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
123 struct rte_cryptodev_sym_session *sess);
125 void sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess);
127 unsigned int cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused);
129 void cnxk_ae_session_clear(struct rte_cryptodev *dev,
130 struct rte_cryptodev_asym_session *sess);
131 int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
132 struct rte_crypto_asym_xform *xform,
133 struct rte_cryptodev_asym_session *sess,
134 struct rte_mempool *pool);
136 static inline union rte_event_crypto_metadata *
137 cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
139 union rte_event_crypto_metadata *ec_mdata;
141 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
142 ec_mdata = rte_cryptodev_sym_session_get_user_data(
144 else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
145 op->private_data_offset)
146 ec_mdata = (union rte_event_crypto_metadata
147 *)((uint8_t *)op + op->private_data_offset);
154 static __rte_always_inline void
155 pending_queue_advance(uint64_t *index, const uint64_t mask)
157 *index = (*index + 1) & mask;
160 static __rte_always_inline void
161 pending_queue_retreat(uint64_t *index, const uint64_t mask, uint64_t nb_entry)
163 *index = (*index - nb_entry) & mask;
166 static __rte_always_inline uint64_t
167 pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
169 return (head - tail) & mask;
172 static __rte_always_inline uint64_t
173 pending_queue_free_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
175 /* mask is nb_desc - 1 */
176 return mask - pending_queue_infl_cnt(head, tail, mask);
179 #endif /* _CNXK_CRYPTODEV_OPS_H_ */