summaryrefslogtreecommitdiff
path: root/libsanitizer/sanitizer_common/sanitizer_libc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/sanitizer_common/sanitizer_libc.cc')
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.cc20
1 files changed, 20 insertions, 0 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc
index 2a75e431b31..53c87555092 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -14,6 +14,16 @@
namespace __sanitizer {
+// Make the compiler think that something is going on there.
+static inline void break_optimization(void *arg) {
+#if SANITIZER_WINDOWS
+ // FIXME: make sure this is actually enough.
+ __asm;
+#else
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
+}
+
s64 internal_atoll(const char *nptr) {
return internal_simple_strtoll(nptr, (char**)0, 10);
}
@@ -60,6 +70,16 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
+// Semi-fast bzero for 16-aligned data. Still far from peak performance.
+void internal_bzero_aligned16(void *s, uptr n) {
+ struct S16 { u64 a, b; } ALIGNED(16);
+ CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
+ for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
+ p->a = p->b = 0;
+ break_optimization(0); // Make sure this does not become memset.
+ }
+}
+
void *internal_memset(void* s, int c, uptr n) {
// The next line prevents Clang from making a call to memset() instead of the
// loop below.