From 1aba3a8367f8e9a6f12e409f170008d930f87855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Storsj=C3=B6?= Date: Thu, 15 Jul 2021 12:26:40 +0300 Subject: libuv: Fix building with mingw toolchains for ARM/AArch64 This is a backport of f9ad802fa5dd5afe6730f8e00cfdbf98f1d7a969 from the v1.x branch from upstream libuv: mingw: fix building for ARM/AArch64 Don't use x86 inline assembly in these cases, but fall back to __sync_fetch_and_or, similar to _InterlockedOr8 in the MSVC case. This corresponds to what is done in src/unix/atomic-ops.h, where ARM/AArch64 cases end up implementing cmpxchgi with __sync_val_compare_and_swap. PR-URL: https://github.com/libuv/libuv/pull/3236 Reviewed-By: Jameson Nash --- Utilities/cmlibuv/src/win/atomicops-inl.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'Utilities/cmlibuv') diff --git a/Utilities/cmlibuv/src/win/atomicops-inl.h b/Utilities/cmlibuv/src/win/atomicops-inl.h index 52713cf305..2f984c6db0 100644 --- a/Utilities/cmlibuv/src/win/atomicops-inl.h +++ b/Utilities/cmlibuv/src/win/atomicops-inl.h @@ -39,10 +39,11 @@ static char INLINE uv__atomic_exchange_set(char volatile* target) { return _InterlockedOr8(target, 1); } -#else /* GCC */ +#else /* GCC, Clang in mingw mode */ -/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */ static inline char uv__atomic_exchange_set(char volatile* target) { +#if defined(__i386__) || defined(__x86_64__) + /* Mingw-32 version, hopefully this works for 64-bit gcc as well. */ const char one = 1; char old_value; __asm__ __volatile__ ("lock xchgb %0, %1\n\t" @@ -50,6 +51,9 @@ static inline char uv__atomic_exchange_set(char volatile* target) { : "0"(one), "m"(*target) : "memory"); return old_value; +#else + return __sync_fetch_and_or(target, 1); +#endif } #endif -- cgit v1.2.1