+static inline bool
+eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
+{
+ return bufp->count >= BATCH_SIZE;
+}
+
+static inline bool
+eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
+{
+ return (bufp->size - bufp->count) >= BATCH_SIZE;
+}
+
+static inline void
+eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
+{
+ rte_free(bufp->op_buffer);
+}
+
+static inline int
+eca_circular_buffer_init(const char *name,
+ struct crypto_ops_circular_buffer *bufp,
+ uint16_t sz)
+{
+ bufp->op_buffer = rte_zmalloc(name,
+ sizeof(struct rte_crypto_op *) * sz,
+ 0);
+ if (bufp->op_buffer == NULL)
+ return -ENOMEM;
+
+ bufp->size = sz;
+ return 0;
+}
+
+static inline int
+eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
+ struct rte_crypto_op *op)
+{
+ uint16_t *tailp = &bufp->tail;
+
+ bufp->op_buffer[*tailp] = op;
+ /* circular buffer, go round */
+ *tailp = (*tailp + 1) % bufp->size;
+ bufp->count++;
+
+ return 0;
+}
+
+static inline int
+eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
+ uint8_t cdev_id, uint16_t qp_id,
+ uint16_t *nb_ops_flushed)
+{
+ uint16_t n = 0;
+ uint16_t *headp = &bufp->head;
+ uint16_t *tailp = &bufp->tail;
+ struct rte_crypto_op **ops = bufp->op_buffer;
+
+ if (*tailp > *headp)
+ n = *tailp - *headp;
+ else if (*tailp < *headp)
+ n = bufp->size - *headp;
+ else {
+ *nb_ops_flushed = 0;
+ return 0; /* buffer empty */
+ }
+
+ *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
+ &ops[*headp], n);
+ bufp->count -= *nb_ops_flushed;
+ if (!bufp->count) {
+ *headp = 0;
+ *tailp = 0;
+ } else
+ *headp = (*headp + *nb_ops_flushed) % bufp->size;
+
+ return *nb_ops_flushed == n ? 0 : -1;
+}
+