summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Cahill <michael.cahill@mongodb.com>2016-04-20 17:02:50 +1000
committerMichael Cahill <michael.cahill@mongodb.com>2016-04-20 17:04:16 +1000
commit88b898e7cb4ede9d1d525ae7d4edd9ab8e319f8d (patch)
treee9d81c95f0dfef2e1b12e1b2dc968ac74af1562d
parent7ea2631de25c4246c83af146bf39fe83cbcb2055 (diff)
downloadmongo-88b898e7cb4ede9d1d525ae7d4edd9ab8e319f8d.tar.gz
Merge pull request #2670 from wiredtiger/wt-2566mongodb-3.2.6
WT-2566 Lock/unlock operations should imply memory barriers. (cherry picked from commit 05cfbc26c2ab2099d7c98080a79ae67ea531c24f)
-rw-r--r--src/include/mutex.i12
-rw-r--r--src/support/mtx_rw.c37
2 files changed, 38 insertions, 11 deletions
diff --git a/src/include/mutex.i b/src/include/mutex.i
index 52250f84ab3..65956c13c08 100644
--- a/src/include/mutex.i
+++ b/src/include/mutex.i
@@ -306,6 +306,12 @@ __wt_fair_lock(WT_SESSION_IMPL *session, WT_FAIR_LOCK *lock)
__wt_sleep(0, 10);
}
+ /*
+ * Applications depend on a barrier here so that operations holding the
+ * lock see consistent data.
+ */
+ WT_READ_BARRIER();
+
return (0);
}
@@ -319,6 +325,12 @@ __wt_fair_unlock(WT_SESSION_IMPL *session, WT_FAIR_LOCK *lock)
WT_UNUSED(session);
/*
+ * Ensure that all updates made while the lock was held are visible to
+ * the next thread to acquire the lock.
+ */
+ WT_WRITE_BARRIER();
+
+ /*
* We have exclusive access - the update does not need to be atomic.
*/
++lock->fair_lock_owner;
diff --git a/src/support/mtx_rw.c b/src/support/mtx_rw.c
index b6876cdfbdc..dbf73bb4f13 100644
--- a/src/support/mtx_rw.c
+++ b/src/support/mtx_rw.c
@@ -183,6 +183,8 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
session, WT_VERB_MUTEX, "rwlock: readlock %s", rwlock->name));
WT_STAT_FAST_CONN_INCR(session, rwlock_read);
+ WT_DIAGNOSTIC_YIELD;
+
l = &rwlock->rwlock;
/*
@@ -213,6 +215,12 @@ __wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
*/
++l->s.readers;
+ /*
+ * Applications depend on a barrier here so that operations holding the
+ * lock see consistent data.
+ */
+ WT_READ_BARRIER();
+
return (0);
}
@@ -306,6 +314,12 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
__wt_sleep(0, 10);
}
+ /*
+ * Applications depend on a barrier here so that operations holding the
+ * lock see consistent data.
+ */
+ WT_READ_BARRIER();
+
return (0);
}
@@ -316,31 +330,32 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
int
__wt_writeunlock(WT_SESSION_IMPL *session, WT_RWLOCK *rwlock)
{
- wt_rwlock_t *l, copy;
+ wt_rwlock_t *l, new;
WT_RET(__wt_verbose(
session, WT_VERB_MUTEX, "rwlock: writeunlock %s", rwlock->name));
+ /*
+ * Ensure that all updates made while the lock was held are visible to
+ * the next thread to acquire the lock.
+ */
+ WT_WRITE_BARRIER();
+
l = &rwlock->rwlock;
- copy = *l;
+ new = *l;
/*
* We're the only writer of the writers/readers fields, so the update
* does not need to be atomic; we have to update both values at the
* same time though, otherwise we'd potentially race with the thread
* next granted the lock.
- *
- * Use a memory barrier to ensure the compiler doesn't mess with these
- * instructions and rework the code in a way that avoids the update as
- * a unit.
*/
- WT_BARRIER();
-
- ++copy.s.writers;
- ++copy.s.readers;
+ ++new.s.writers;
+ ++new.s.readers;
+ l->i.wr = new.i.wr;
- l->i.wr = copy.i.wr;
+ WT_DIAGNOSTIC_YIELD;
return (0);
}