+static inline uint16_t
+nitrox_qp_used_count(struct nitrox_qp *qp)
+{
+ return rte_atomic16_read(&qp->pending_count);
+}
+
+static inline struct nitrox_softreq *
+nitrox_qp_get_softreq(struct nitrox_qp *qp)
+{
+ uint32_t tail = qp->tail % qp->count;
+
+ rte_smp_rmb();
+ return qp->ridq[tail].sr;
+}
+
+static inline void
+nitrox_ring_dbell(struct nitrox_qp *qp, uint16_t cnt)
+{
+ struct command_queue *cmdq = &qp->cmdq;
+
+ if (!cnt)
+ return;
+
+ rte_io_wmb();
+ rte_write64(cnt, cmdq->dbell_csr_addr);
+}
+
+static inline void
+nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct nitrox_softreq *sr)
+{
+ uint32_t head = qp->head % qp->count;
+
+ qp->head++;
+ memcpy(&qp->cmdq.ring[head * qp->cmdq.instr_size],
+ instr, qp->cmdq.instr_size);
+ qp->ridq[head].sr = sr;
+ rte_smp_wmb();
+ rte_atomic16_inc(&qp->pending_count);
+}
+
+static inline void
+nitrox_qp_dequeue(struct nitrox_qp *qp)
+{
+ qp->tail++;
+ rte_atomic16_dec(&qp->pending_count);
+}
+