sw->sched_no_iq_enqueues += (in_pkts_total == 0);
sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+ uint64_t work_done = (in_pkts_total + out_pkts_total) != 0;
+ sw->sched_progress_last_iter = work_done;
+
+ uint64_t cqs_scheds_last_iter = 0;
+
/* push all the internal buffered QEs in port->cq_ring to the
* worker cores: aka, do the ring transfers batched.
*/
&sw->cq_ring_space[i]);
port->cq_buf_count = 0;
no_enq = 0;
+ cqs_scheds_last_iter |= (1ULL << i);
} else {
sw->cq_ring_space[i] =
rte_event_ring_free_count(worker) -
sw->sched_min_burst = sw->sched_min_burst_size;
}
+ /* Provide stats on what eventdev ports were scheduled to this
+ * iteration. If more than 64 ports are active, always report that
+ * all Eventdev ports have been scheduled events.
+ */
+ sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
+ if (unlikely(sw->port_count >= 64))
+ sw->sched_last_iter_bitmask = UINT64_MAX;
}