* checked against the expected value, and if they match, the entering of
* optimized power state may be aborted.
*
+ * @warning It is responsibility of the user to check if this function is
+ * supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
+ * Failing to do so may result in an illegal CPU instruction error.
+ *
* @param p
* Address to monitor for changes.
* @param expected_value
* to undefined result.
*/
__rte_experimental
-static inline void rte_power_monitor(const volatile void *p,
+void rte_power_monitor(const volatile void *p,
const uint64_t expected_value, const uint64_t value_mask,
const uint64_t tsc_timestamp, const uint8_t data_sz);
* This call will also lock a spinlock on entering sleep, and release it on
* waking up the CPU.
*
+ * @warning It is responsibility of the user to check if this function is
+ * supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
+ * Failing to do so may result in an illegal CPU instruction error.
+ *
* @param p
* Address to monitor for changes.
* @param expected_value
* wakes up.
*/
__rte_experimental
-static inline void rte_power_monitor_sync(const volatile void *p,
+void rte_power_monitor_sync(const volatile void *p,
const uint64_t expected_value, const uint64_t value_mask,
const uint64_t tsc_timestamp, const uint8_t data_sz,
rte_spinlock_t *lck);
* Enter an architecture-defined optimized power state until a certain TSC
* timestamp is reached.
*
+ * @warning It is responsibility of the user to check if this function is
+ * supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
+ * Failing to do so may result in an illegal CPU instruction error.
+ *
* @param tsc_timestamp
* Maximum TSC timestamp to wait for. Note that the wait behavior is
* architecture-dependent.
*/
__rte_experimental
-static inline void rte_power_pause(const uint64_t tsc_timestamp);
+void rte_power_pause(const uint64_t tsc_timestamp);
#endif /* _RTE_POWER_INTRINSIC_H_ */