void *mdata;
uint8_t op_flags;
void *qp;
-} __rte_aligned(16);
+} __rte_aligned(ROC_ALIGN);
+
+PLT_STATIC_ASSERT(sizeof(struct cpt_inflight_req) == ROC_CACHE_LINE_SZ);
struct pending_queue {
/** Array of pending requests */
struct rte_cryptodev_asym_session *sess);
int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
struct rte_crypto_asym_xform *xform,
- struct rte_cryptodev_asym_session *sess,
- struct rte_mempool *pool);
+ struct rte_cryptodev_asym_session *sess);
+void cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
static inline union rte_event_crypto_metadata *
cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
static __rte_always_inline uint64_t
pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
{
- return (head - tail) & mask;
+ /*
+ * Mask is nb_desc - 1. Add nb_desc to head and mask to account for
+ * cases when tail > head, which happens during wrap around.
+ */
+ return ((head + mask + 1) - tail) & mask;
}
static __rte_always_inline uint64_t