eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
unsigned int max_ops)
{
- while (max_ops) {
+ unsigned int ops_left = max_ops;
+
+ while (ops_left > 0) {
unsigned int e_cnt, d_cnt;
- e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
- max_ops -= RTE_MIN(max_ops, e_cnt);
+ e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
+ ops_left -= RTE_MIN(ops_left, e_cnt);
- d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
- max_ops -= RTE_MIN(max_ops, d_cnt);
+ d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
+ ops_left -= RTE_MIN(ops_left, d_cnt);
if (e_cnt == 0 && d_cnt == 0)
break;
}
+
+ if (ops_left == max_ops)
+ rte_event_maintain(adapter->eventdev_id,
+ adapter->event_port_id, 0);
}
static int
struct rte_mbuf *mbufs[BATCH_SIZE];
uint16_t n;
uint32_t nb_rx = 0;
+ uint32_t nb_flushed = 0;
if (rxq_empty)
*rxq_empty = 0;
*/
while (rxa_pkt_buf_available(buf)) {
if (buf->count >= BATCH_SIZE)
- rxa_flush_event_buffer(rx_adapter, buf, stats);
+ nb_flushed +=
+ rxa_flush_event_buffer(rx_adapter, buf, stats);
stats->rx_poll_count++;
n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
}
if (buf->count > 0)
- rxa_flush_event_buffer(rx_adapter, buf, stats);
+ nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats);
stats->rx_packets += nb_rx;
+ if (nb_flushed == 0)
+ rte_event_maintain(rx_adapter->eventdev_id,
+ rx_adapter->event_port_id, 0);
return nb_rx;
}