-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2019 Arm Limited
*/
#ifndef _RTE_PAUSE_ARM64_H_
#endif
#include <rte_common.h>
+
+#ifdef RTE_ARM_USE_WFE
+#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+#endif
+
#include "generic/rte_pause.h"
static inline void rte_pause(void)
asm volatile("yield" ::: "memory");
}
+#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+
+/* Send an event to quit WFE. */
+#define __SEVL() { asm volatile("sevl" : : : "memory"); }
+
+/* Put processor into low power WFE(Wait For Event) state. */
+#define __WFE() { asm volatile("wfe" : : : "memory"); }
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+ int memorder)
+{
+ uint16_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ /*
+ * Atomic exclusive load from addr, it returns the 16-bit content of
+ * *addr while making it 'monitored',when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_16(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxrh %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxrh %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+ __LOAD_EXC_16(addr, value, memorder)
+ if (value != expected) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_16(addr, value, memorder)
+ } while (value != expected);
+ }
+#undef __LOAD_EXC_16
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
+ int memorder)
+{
+ uint32_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ /*
+ * Atomic exclusive load from addr, it returns the 32-bit content of
+ * *addr while making it 'monitored',when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_32(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxr %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+ __LOAD_EXC_32(addr, value, memorder)
+ if (value != expected) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_32(addr, value, memorder)
+ } while (value != expected);
+ }
+#undef __LOAD_EXC_32
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
+ int memorder)
+{
+ uint64_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ /*
+ * Atomic exclusive load from addr, it returns the 64-bit content of
+ * *addr while making it 'monitored',when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_64(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxr %x[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+ __LOAD_EXC_64(addr, value, memorder)
+ if (value != expected) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_64(addr, value, memorder)
+ } while (value != expected);
+ }
+}
+#undef __LOAD_EXC_64
+
+#undef __SEVL
+#undef __WFE
+
+#endif
+
#ifdef __cplusplus
}
#endif