int mlen;
};
-enum sym_xform_type {
- CNXK_CPT_CIPHER = 1,
- CNXK_CPT_AUTH,
- CNXK_CPT_AEAD,
- CNXK_CPT_CIPHER_ENC_AUTH_GEN,
- CNXK_CPT_AUTH_VRFY_CIPHER_DEC,
- CNXK_CPT_AUTH_GEN_CIPHER_ENC,
- CNXK_CPT_CIPHER_DEC_AUTH_VRFY
-};
-
#define CPT_OP_FLAGS_METABUF (1 << 1)
#define CPT_OP_FLAGS_AUTH_VERIFY (1 << 0)
#define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
void *mdata;
uint8_t op_flags;
void *qp;
-} __rte_aligned(16);
+} __rte_aligned(ROC_ALIGN);
+
+PLT_STATIC_ASSERT(sizeof(struct cpt_inflight_req) == ROC_CACHE_LINE_SZ);
struct pending_queue {
- /** Pending requests count */
- uint64_t pending_count;
/** Array of pending requests */
struct cpt_inflight_req *req_queue;
- /** Tail of queue to be used for enqueue */
- uint16_t enq_tail;
- /** Head of queue to be used for dequeue */
- uint16_t deq_head;
+ /** Head of the queue to be used for enqueue */
+ uint64_t head;
+ /** Tail of the queue to be used for dequeue */
+ uint64_t tail;
+ /** Pending queue mask */
+ uint64_t pq_mask;
/** Timeout to track h/w being unresponsive */
uint64_t time_out;
};
struct rte_cryptodev_asym_session *sess);
int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
struct rte_crypto_asym_xform *xform,
- struct rte_cryptodev_asym_session *sess,
- struct rte_mempool *pool);
+ struct rte_cryptodev_asym_session *sess);
+void cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
static inline union rte_event_crypto_metadata *
cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
return ec_mdata;
}
+static __rte_always_inline void
+pending_queue_advance(uint64_t *index, const uint64_t mask)
+{
+ *index = (*index + 1) & mask;
+}
+
+static __rte_always_inline void
+pending_queue_retreat(uint64_t *index, const uint64_t mask, uint64_t nb_entry)
+{
+ *index = (*index - nb_entry) & mask;
+}
+
+static __rte_always_inline uint64_t
+pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
+{
+ /*
+ * Mask is nb_desc - 1. Add nb_desc to head and mask to account for
+ * cases when tail > head, which happens during wrap around.
+ */
+ return ((head + mask + 1) - tail) & mask;
+}
+
+static __rte_always_inline uint64_t
+pending_queue_free_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
+{
+ /* mask is nb_desc - 1 */
+ return mask - pending_queue_infl_cnt(head, tail, mask);
+}
+
#endif /* _CNXK_CRYPTODEV_OPS_H_ */