summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/src/support/mtx_rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/third_party/wiredtiger/src/support/mtx_rw.c')
-rw-r--r--src/third_party/wiredtiger/src/support/mtx_rw.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/src/third_party/wiredtiger/src/support/mtx_rw.c b/src/third_party/wiredtiger/src/support/mtx_rw.c
index 2354ad4f4cc..eeb9c6b72a2 100644
--- a/src/third_party/wiredtiger/src/support/mtx_rw.c
+++ b/src/third_party/wiredtiger/src/support/mtx_rw.c
@@ -91,6 +91,8 @@ int
__wt_rwlock_init(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
l->u.v = 0;
+ l->stat_read_count_off = l->stat_write_count_off = -1;
+ l->stat_app_usecs_off = l->stat_int_usecs_off = -1;
WT_RET(__wt_cond_alloc(session, "rwlock wait", &l->cond_readers));
WT_RET(__wt_cond_alloc(session, "rwlock wait", &l->cond_writers));
@@ -118,8 +120,13 @@ int
__wt_try_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
WT_RWLOCK new, old;
+ int64_t **stats;
WT_STAT_CONN_INCR(session, rwlock_read);
+ if (l->stat_read_count_off != -1 && WT_STAT_ENABLED(session)) {
+ stats = (int64_t **)S2C(session)->stats;
+ stats[session->stat_bucket][l->stat_read_count_off]++;
+ }
old.u.v = l->u.v;
@@ -159,11 +166,18 @@ void
__wt_readlock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
WT_RWLOCK new, old;
+ struct timespec enter, leave;
+ int64_t **stats;
int pause_cnt;
int16_t writers_active;
uint8_t ticket;
+ bool set_stats;
WT_STAT_CONN_INCR(session, rwlock_read);
+ stats = (int64_t **)S2C(session)->stats;
+ set_stats = (l->stat_read_count_off != -1 && WT_STAT_ENABLED(session));
+ if (set_stats)
+ stats[session->stat_bucket][l->stat_read_count_off]++;
WT_DIAGNOSTIC_YIELD;
@@ -221,6 +235,8 @@ stall: __wt_cond_wait(session,
break;
}
+ if (set_stats)
+ __wt_epoch(session, &enter);
/* Wait for our group to start. */
for (pause_cnt = 0; ticket != l->u.s.current; pause_cnt++) {
if (pause_cnt < 1000)
@@ -234,6 +250,15 @@ stall: __wt_cond_wait(session,
l->cond_readers, 10 * WT_THOUSAND, __read_blocked);
}
}
+ if (set_stats) {
+ __wt_epoch(session, &leave);
+ if (F_ISSET(session, WT_SESSION_INTERNAL))
+ stats[session->stat_bucket][l->stat_int_usecs_off] +=
+ (int64_t)WT_TIMEDIFF_US(leave, enter);
+ else
+ stats[session->stat_bucket][l->stat_app_usecs_off] +=
+ (int64_t)WT_TIMEDIFF_US(leave, enter);
+ }
/*
* Applications depend on a barrier here so that operations holding the
@@ -282,8 +307,13 @@ int
__wt_try_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
WT_RWLOCK new, old;
+ int64_t **stats;
WT_STAT_CONN_INCR(session, rwlock_write);
+ if (l->stat_write_count_off != -1 && WT_STAT_ENABLED(session)) {
+ stats = (int64_t **)S2C(session)->stats;
+ stats[session->stat_bucket][l->stat_write_count_off]++;
+ }
/*
* This write lock can only be granted if no readers or writers blocked
@@ -333,10 +363,17 @@ void
__wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
{
WT_RWLOCK new, old;
+ struct timespec enter, leave;
+ int64_t **stats;
int pause_cnt;
uint8_t ticket;
+ bool set_stats;
WT_STAT_CONN_INCR(session, rwlock_write);
+ stats = (int64_t **)S2C(session)->stats;
+ set_stats = (l->stat_write_count_off != -1 && WT_STAT_ENABLED(session));
+ if (set_stats)
+ stats[session->stat_bucket][l->stat_write_count_off]++;
for (;;) {
old.u.v = l->u.v;
@@ -367,6 +404,8 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
* could see no readers active from a different batch and decide that
* we have the lock.
*/
+ if (set_stats)
+ __wt_epoch(session, &enter);
for (pause_cnt = 0, old.u.v = l->u.v;
ticket != old.u.s.current || old.u.s.readers_active != 0;
pause_cnt++, old.u.v = l->u.v) {
@@ -381,6 +420,15 @@ __wt_writelock(WT_SESSION_IMPL *session, WT_RWLOCK *l)
l->cond_writers, 10 * WT_THOUSAND, __write_blocked);
}
}
+ if (set_stats) {
+ __wt_epoch(session, &leave);
+ if (F_ISSET(session, WT_SESSION_INTERNAL))
+ stats[session->stat_bucket][l->stat_int_usecs_off] +=
+ (int64_t)WT_TIMEDIFF_US(leave, enter);
+ else
+ stats[session->stat_bucket][l->stat_app_usecs_off] +=
+ (int64_t)WT_TIMEDIFF_US(leave, enter);
+ }
/*
* Applications depend on a barrier here so that operations holding the