1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #ifndef _RTE_POWER_INTRINSIC_H_
6 #define _RTE_POWER_INTRINSIC_H_
10 #include <rte_compat.h>
11 #include <rte_spinlock.h>
15 * Advanced power management operations.
17 * This file define APIs for advanced power management,
18 * which are architecture-dependent.
21 struct rte_power_monitor_cond {
22 volatile void *addr; /**< Address to monitor for changes */
23 uint64_t val; /**< Before attempting the monitoring, the address
24 * may be read and compared against this value.
26 uint64_t mask; /**< 64-bit mask to extract current value from addr */
27 uint8_t data_sz; /**< Data size (in bytes) that will be used to compare
28 * expected value with the memory address. Can be 1,
29 * 2, 4, or 8. Supplying any other value will lead to
30 * undefined result. */
35 * @b EXPERIMENTAL: this API may change without prior notice
37 * Monitor specific address for changes. This will cause the CPU to enter an
38 * architecture-defined optimized power state until either the specified
39 * memory address is written to, a certain TSC timestamp is reached, or other
40 * reasons cause the CPU to wake up.
42 * Additionally, an `expected` 64-bit value and 64-bit mask are provided. If
43 * mask is non-zero, the current value pointed to by the `p` pointer will be
44 * checked against the expected value, and if they match, the entering of
45 * optimized power state may be aborted.
47 * @warning It is responsibility of the user to check if this function is
48 * supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
51 * The monitoring condition structure.
52 * @param tsc_timestamp
53 * Maximum TSC timestamp to wait for. Note that the wait behavior is
54 * architecture-dependent.
58 * -EINVAL on invalid parameters
59 * -ENOTSUP if unsupported
62 int rte_power_monitor(const struct rte_power_monitor_cond *pmc,
63 const uint64_t tsc_timestamp);
66 * @b EXPERIMENTAL: this API may change without prior notice
68 * Monitor specific address for changes. This will cause the CPU to enter an
69 * architecture-defined optimized power state until either the specified
70 * memory address is written to, a certain TSC timestamp is reached, or other
71 * reasons cause the CPU to wake up.
73 * Additionally, an `expected` 64-bit value and 64-bit mask are provided. If
74 * mask is non-zero, the current value pointed to by the `p` pointer will be
75 * checked against the expected value, and if they match, the entering of
76 * optimized power state may be aborted.
78 * This call will also lock a spinlock on entering sleep, and release it on
81 * @warning It is responsibility of the user to check if this function is
82 * supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
85 * The monitoring condition structure.
86 * @param tsc_timestamp
87 * Maximum TSC timestamp to wait for. Note that the wait behavior is
88 * architecture-dependent.
90 * A spinlock that must be locked before entering the function, will be
91 * unlocked while the CPU is sleeping, and will be locked again once the CPU
96 * -EINVAL on invalid parameters
97 * -ENOTSUP if unsupported
100 int rte_power_monitor_sync(const struct rte_power_monitor_cond *pmc,
101 const uint64_t tsc_timestamp, rte_spinlock_t *lck);
105 * @b EXPERIMENTAL: this API may change without prior notice
107 * Enter an architecture-defined optimized power state until a certain TSC
108 * timestamp is reached.
110 * @warning It is responsibility of the user to check if this function is
111 * supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
113 * @param tsc_timestamp
114 * Maximum TSC timestamp to wait for. Note that the wait behavior is
115 * architecture-dependent.
119 * -EINVAL on invalid parameters
120 * -ENOTSUP if unsupported
123 int rte_power_pause(const uint64_t tsc_timestamp);
125 #endif /* _RTE_POWER_INTRINSIC_H_ */