summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Kuvyrkov <maxim@codesourcery.com>2012-08-15 16:44:30 -0700
committerMaxim Kuvyrkov <maxim@codesourcery.com>2012-08-15 16:44:30 -0700
commit309becf120640bb0fdd1a16e65996c23d0542590 (patch)
tree6adbada4d391a306dd2e4558626f759a7cadbed7
parentef4009734b84903615be28b38638c166e5455692 (diff)
downloadglibc-309becf120640bb0fdd1a16e65996c23d0542590.tar.gz
Optimize __libc_lock_lock and __libc_lock_trylock for MIPS.
-rw-r--r--nptl/ChangeLog6
-rw-r--r--nptl/sysdeps/pthread/bits/libc-lockP.h10
-rw-r--r--ports/ChangeLog.mips6
-rw-r--r--ports/sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h39
4 files changed, 57 insertions, 4 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index 0f31b4dbe4..545b2c2152 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,9 @@
+2012-08-15 Tom de Vries <vries@codesourcery.com>
+ Maxim Kuvyrkov <maxim@codesourcery.com>
+
+ * sysdeps/pthread/bits/libc-lockP.h (__libc_lock_lock)
+ (__libc_lock_trylock): Allow pre-existing definitions.
+
2012-08-15 Maxim Kuvyrkov <maxim@codesourcery.com>
* pthread_spin_lock.c: New file.
diff --git a/nptl/sysdeps/pthread/bits/libc-lockP.h b/nptl/sysdeps/pthread/bits/libc-lockP.h
index 0ebac917d7..7adaeb4398 100644
--- a/nptl/sysdeps/pthread/bits/libc-lockP.h
+++ b/nptl/sysdeps/pthread/bits/libc-lockP.h
@@ -176,9 +176,12 @@ typedef pthread_key_t __libc_key_t;
/* Lock the named lock variable. */
#if !defined NOT_IN_libc || defined IS_IN_libpthread
-# define __libc_lock_lock(NAME) \
+# ifndef __libc_lock_lock
+# define __libc_lock_lock(NAME) \
({ lll_lock (NAME, LLL_PRIVATE); 0; })
+# endif
#else
+# undef __libc_lock_lock
# define __libc_lock_lock(NAME) \
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
#endif
@@ -189,9 +192,12 @@ typedef pthread_key_t __libc_key_t;
/* Try to lock the named lock variable. */
#if !defined NOT_IN_libc || defined IS_IN_libpthread
-# define __libc_lock_trylock(NAME) \
+# ifndef __libc_lock_trylock
+# define __libc_lock_trylock(NAME) \
lll_trylock (NAME)
+# endif
#else
+# undef __libc_lock_trylock
# define __libc_lock_trylock(NAME) \
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
#endif
diff --git a/ports/ChangeLog.mips b/ports/ChangeLog.mips
index 518777223c..f8c18b3275 100644
--- a/ports/ChangeLog.mips
+++ b/ports/ChangeLog.mips
@@ -1,3 +1,9 @@
+2012-08-15 Tom de Vries <vries@codesourcery.com>
+ Maxim Kuvyrkov <maxim@codesourcery.com>
+
+ * sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h (__libc_lock_lock)
+ (__libc_lock_trylock): Define versions optimized for MIPS.
+
2012-08-15 Maxim Kuvyrkov <maxim@codesourcery.com>
* sysdeps/mips/nptl/pthread_spin_lock.S: Remove, use generic version.
diff --git a/ports/sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h b/ports/sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h
index 88b601eadd..d368ae1b66 100644
--- a/ports/sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h
+++ b/ports/sysdeps/unix/sysv/linux/mips/nptl/lowlevellock.h
@@ -1,5 +1,4 @@
-/* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008,
- 2009 Free Software Foundation, Inc.
+/* Copyright (C) 2003-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -291,4 +290,40 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
__res; \
})
+/* Implement __libc_lock_lock using exchange_and_add, which expands into
+ a single instruction on XLP processors. We enable this for all MIPS
+ processors as atomic_exchange_and_add_acq and
+ atomic_compare_and_exchange_acq take the same time to execute.
+ This is a simplified expansion of ({ lll_lock (NAME, LLL_PRIVATE); 0; }).
+
+ Note: __lll_lock_wait_private() resets lock value to '2', which prevents
+ unbounded increase of the lock value and [with billions of threads]
+ overflow. */
+#define __libc_lock_lock(NAME) \
+ ({ \
+ int *__futex = &(NAME); \
+ if (__builtin_expect (atomic_exchange_and_add_acq (__futex, 1), 0)) \
+ __lll_lock_wait_private (__futex); \
+ 0; \
+ })
+
+#ifdef _MIPS_ARCH_XLP
+/* The generic version using a single atomic_compare_and_exchange_acq takes
+ less time for non-XLP processors, so we use below for XLP only. */
+# define __libc_lock_trylock(NAME) \
+ ({ \
+ int *__futex = &(NAME); \
+ int __result = atomic_exchange_and_add_acq (__futex, 1); \
+ /* If __result == 0, we succeeded in acquiring the lock. \
+ If __result == 1, we switched the lock to 'contended' state, which \
+ will cause a [possibly unnecessary] call to lll_futex_wait. This is \
+ unlikely, so we accept the possible inefficiency. \
+ If __result >= 2, we need to set the lock to 'contended' state to avoid \
+ unbounded increase from subsequent trylocks. */ \
+ if (__result >= 2) \
+ __result = atomic_exchange_acq (__futex, 2); \
+ __result; \
+ })
+#endif
+
#endif /* lowlevellock.h */