+unsigned int cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused);
+
+void cnxk_ae_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess);
+int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *pool);
+void cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
+
+static inline union rte_event_crypto_metadata *
+cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
+{
+ union rte_event_crypto_metadata *ec_mdata;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ ec_mdata = rte_cryptodev_sym_session_get_user_data(
+ op->sym->session);
+ else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ op->private_data_offset)
+ ec_mdata = (union rte_event_crypto_metadata
+ *)((uint8_t *)op + op->private_data_offset);
+ else
+ return NULL;
+
+ return ec_mdata;
+}
+
+static __rte_always_inline void
+pending_queue_advance(uint64_t *index, const uint64_t mask)
+{
+ *index = (*index + 1) & mask;
+}
+
+static __rte_always_inline void
+pending_queue_retreat(uint64_t *index, const uint64_t mask, uint64_t nb_entry)
+{
+ *index = (*index - nb_entry) & mask;
+}
+
+static __rte_always_inline uint64_t
+pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
+{
+ /*
+ * Mask is nb_desc - 1. Add nb_desc to head and mask to account for
+ * cases when tail > head, which happens during wrap around.
+ */
+ return ((head + mask + 1) - tail) & mask;
+}
+
+static __rte_always_inline uint64_t
+pending_queue_free_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
+{
+ /* mask is nb_desc - 1 */
+ return mask - pending_queue_infl_cnt(head, tail, mask);
+}
+