Also, make sure to start the actual text at the margin.
=======================================================
+* **Added write combining store APIs.**
+
+ Added ``rte_write32_wc`` and ``rte_write32_wc_relaxed`` APIs
+ that enable write combining stores (depending on architecture).
+ The functions are provided as a generic stubs and
+ x86 specific implementation.
+
* **Added the FEC API, for a generic FEC query and config.**
Added the FEC API which provides functions for query FEC capabilities and
rte_write64_relaxed(value, addr);
}
+static __rte_always_inline void
+rte_write32_wc(uint32_t value, volatile void *addr)
+{
+ rte_write32(value, addr);
+}
+
+static __rte_always_inline void
+rte_write32_wc_relaxed(uint32_t value, volatile void *addr)
+{
+ rte_write32_relaxed(value, addr);
+}
+
#ifdef __cplusplus
}
#endif
static inline void
rte_write64(uint64_t value, volatile void *addr);
+/**
+ * Write a 32-bit value to I/O device memory address addr using write
+ * combining memory write protocol. Depending on the platform write combining
+ * may not be available and/or may be treated as a hint and the behavior may
+ * fallback to a regular store.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+__rte_experimental
+static inline void
+rte_write32_wc(uint32_t value, volatile void *addr);
+
+/**
+ * Write a 32-bit value to I/O device memory address addr using write
+ * combining memory write protocol. Depending on the platform write combining
+ * may not be available and/or may be treated as a hint and the behavior may
+ * fallback to a regular store.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+__rte_experimental
+static inline void
+rte_write32_wc_relaxed(uint32_t value, volatile void *addr);
+
#endif /* __DOXYGEN__ */
#ifndef RTE_OVERRIDE_IO_H
rte_write64_relaxed(value, addr);
}
+#ifndef RTE_NATIVE_WRITE32_WC
+static __rte_always_inline void
+rte_write32_wc(uint32_t value, volatile void *addr)
+{
+ rte_write32(value, addr);
+}
+
+static __rte_always_inline void
+rte_write32_wc_relaxed(uint32_t value, volatile void *addr)
+{
+ rte_write32_relaxed(value, addr);
+}
+#endif /* RTE_NATIVE_WRITE32_WC */
+
#endif /* RTE_OVERRIDE_IO_H */
#endif /* _RTE_IO_H_ */
extern "C" {
#endif
+#include "rte_cpuflags.h"
+
+#define RTE_NATIVE_WRITE32_WC
#include "generic/rte_io.h"
+/**
+ * @internal
+ * MOVDIRI wrapper.
+ */
+static __rte_always_inline void
+__rte_x86_movdiri(uint32_t value, volatile void *addr)
+{
+ asm volatile(
+ /* MOVDIRI */
+ ".byte 0x40, 0x0f, 0x38, 0xf9, 0x02"
+ :
+ : "a" (value), "d" (addr));
+}
+
+static __rte_always_inline void
+rte_write32_wc_relaxed(uint32_t value, volatile void *addr)
+{
+ static int _x86_movdiri_flag = -1;
+
+ if (_x86_movdiri_flag == 1) {
+ __rte_x86_movdiri(value, addr);
+ } else if (_x86_movdiri_flag == 0) {
+ rte_write32_relaxed(value, addr);
+ } else {
+ _x86_movdiri_flag =
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIRI) > 0);
+ if (_x86_movdiri_flag == 1)
+ __rte_x86_movdiri(value, addr);
+ else
+ rte_write32_relaxed(value, addr);
+ }
+}
+
+static __rte_always_inline void
+rte_write32_wc(uint32_t value, volatile void *addr)
+{
+ rte_wmb();
+ rte_write32_wc_relaxed(value, addr);
+}
+
#ifdef __cplusplus
}
#endif