/**
* This function is not supported on ARM.
*/
-void
+int
rte_power_monitor(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz)
RTE_SET_USED(value_mask);
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on ARM.
*/
-void
+int
rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz, rte_spinlock_t *lck)
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(lck);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on ARM.
*/
-void
+int
rte_power_pause(const uint64_t tsc_timestamp)
{
RTE_SET_USED(tsc_timestamp);
+
+ return -ENOTSUP;
}
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
- * Failing to do so may result in an illegal CPU instruction error.
*
* @param p
* Address to monitor for changes.
* Data size (in bytes) that will be used to compare expected value with the
* memory address. Can be 1, 2, 4 or 8. Supplying any other value will lead
* to undefined result.
+ *
+ * @return
+ * 0 on success
+ * -EINVAL on invalid parameters
+ * -ENOTSUP if unsupported
*/
__rte_experimental
-void rte_power_monitor(const volatile void *p,
+int rte_power_monitor(const volatile void *p,
const uint64_t expected_value, const uint64_t value_mask,
const uint64_t tsc_timestamp, const uint8_t data_sz);
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
- * Failing to do so may result in an illegal CPU instruction error.
*
* @param p
* Address to monitor for changes.
* A spinlock that must be locked before entering the function, will be
* unlocked while the CPU is sleeping, and will be locked again once the CPU
* wakes up.
+ *
+ * @return
+ * 0 on success
+ * -EINVAL on invalid parameters
+ * -ENOTSUP if unsupported
*/
__rte_experimental
-void rte_power_monitor_sync(const volatile void *p,
+int rte_power_monitor_sync(const volatile void *p,
const uint64_t expected_value, const uint64_t value_mask,
const uint64_t tsc_timestamp, const uint8_t data_sz,
rte_spinlock_t *lck);
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
- * Failing to do so may result in an illegal CPU instruction error.
*
* @param tsc_timestamp
* Maximum TSC timestamp to wait for. Note that the wait behavior is
* architecture-dependent.
+ *
+ * @return
+ * 0 on success
+ * -EINVAL on invalid parameters
+ * -ENOTSUP if unsupported
*/
__rte_experimental
-void rte_power_pause(const uint64_t tsc_timestamp);
+int rte_power_pause(const uint64_t tsc_timestamp);
#endif /* _RTE_POWER_INTRINSIC_H_ */
/**
* This function is not supported on PPC64.
*/
-void
+int
rte_power_monitor(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz)
RTE_SET_USED(value_mask);
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on PPC64.
*/
-void
+int
rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz, rte_spinlock_t *lck)
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(lck);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on PPC64.
*/
-void
+int
rte_power_pause(const uint64_t tsc_timestamp)
{
RTE_SET_USED(tsc_timestamp);
+
+ return -ENOTSUP;
}
#include "rte_power_intrinsics.h"
+static bool wait_supported;
+
static inline uint64_t
__get_umwait_val(const volatile void *p, const uint8_t sz)
{
case sizeof(uint64_t):
return *(const volatile uint64_t *)p;
default:
- /* this is an intrinsic, so we can't have any error handling */
+ /* shouldn't happen */
RTE_ASSERT(0);
return 0;
}
}
+static inline int
+__check_val_size(const uint8_t sz)
+{
+ switch (sz) {
+ case sizeof(uint8_t): /* fall-through */
+ case sizeof(uint16_t): /* fall-through */
+ case sizeof(uint32_t): /* fall-through */
+ case sizeof(uint64_t): /* fall-through */
+ return 0;
+ default:
+ /* unexpected size */
+ return -1;
+ }
+}
+
/**
* This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.
* For more information about usage of these instructions, please refer to
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
*/
-void
+int
rte_power_monitor(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
+
+ /* prevent user from running this instruction if it's not supported */
+ if (!wait_supported)
+ return -ENOTSUP;
+
+ if (__check_val_size(data_sz) < 0)
+ return -EINVAL;
+
/*
* we're using raw byte codes for now as only the newest compiler
* versions support this instruction natively.
/* if the masked value is already matching, abort */
if (masked == expected_value)
- return;
+ return 0;
}
/* execute UMWAIT */
asm volatile(".byte 0xf2, 0x0f, 0xae, 0xf7;"
: /* ignore rflags */
: "D"(0), /* enter C0.2 */
"a"(tsc_l), "d"(tsc_h));
+
+ return 0;
}
/**
* For more information about usage of these instructions, please refer to
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
*/
-void
+int
rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz, rte_spinlock_t *lck)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
+
+ /* prevent user from running this instruction if it's not supported */
+ if (!wait_supported)
+ return -ENOTSUP;
+
+ if (__check_val_size(data_sz) < 0)
+ return -EINVAL;
+
/*
* we're using raw byte codes for now as only the newest compiler
* versions support this instruction natively.
/* if the masked value is already matching, abort */
if (masked == expected_value)
- return;
+ return 0;
}
rte_spinlock_unlock(lck);
"a"(tsc_l), "d"(tsc_h));
rte_spinlock_lock(lck);
+
+ return 0;
}
/**
* information about usage of this instruction, please refer to Intel(R) 64 and
* IA-32 Architectures Software Developer's Manual.
*/
-void
+int
rte_power_pause(const uint64_t tsc_timestamp)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
+ /* prevent user from running this instruction if it's not supported */
+ if (!wait_supported)
+ return -ENOTSUP;
+
/* execute TPAUSE */
asm volatile(".byte 0x66, 0x0f, 0xae, 0xf7;"
: /* ignore rflags */
: "D"(0), /* enter C0.2 */
"a"(tsc_l), "d"(tsc_h));
+
+ return 0;
+}
+
+RTE_INIT(rte_power_intrinsics_init) {
+ struct rte_cpu_intrinsics i;
+
+ rte_cpu_get_intrinsics_support(&i);
+
+ if (i.power_monitor && i.power_pause)
+ wait_supported = 1;
}