/* The credit window is one high water mark of QEs */
qm_port->dir_pushcount_at_credit_expiry = 0;
qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
- qm_port->cq_depth = cfg.cq_depth;
/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
* the effective depth is smaller.
*/
/* Interrupts not supported by PF PMD */
return 1;
} else if (dlb->umwait_allowed) {
+ struct rte_power_monitor_cond pmc;
volatile struct dlb_dequeue_qe *cq_base;
union {
uint64_t raw_qe[2];
else
expected_value = 0;
- rte_power_monitor(monitor_addr, expected_value,
- qe_mask.raw_qe[1], timeout + start_ticks,
- sizeof(uint64_t));
+ pmc.addr = monitor_addr;
+ pmc.val = expected_value;
+ pmc.mask = qe_mask.raw_qe[1];
+ pmc.size = sizeof(uint64_t);
+
+ rte_power_monitor(&pmc, timeout + start_ticks);
DLB_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
} else {
/* This function intentionally left blank. */
}
+static int
+dlb_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ RTE_SET_USED(dev);
+ uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
+
+ *timeout_ticks = ns * cycles_per_ns;
+
+ return 0;
+}
+
void
dlb_entry_points_init(struct rte_eventdev *dev)
{
.port_unlink = dlb_eventdev_port_unlink,
.port_unlinks_in_progress =
dlb_eventdev_port_unlinks_in_progress,
+ .timeout_ticks = dlb_eventdev_timeout_ticks,
.dump = dlb_eventdev_dump,
.xstats_get = dlb_eventdev_xstats_get,
.xstats_get_names = dlb_eventdev_xstats_get_names,