summaryrefslogtreecommitdiff
path: root/storage/perfschema
diff options
context:
space:
mode:
authorMarc Alff <marc.alff@oracle.com>2010-08-12 03:51:58 -0600
committerMarc Alff <marc.alff@oracle.com>2010-08-12 03:51:58 -0600
commita1d90f124f30f30d3bc609d7a5783341089e54ce (patch)
tree0ae56b9fa9e13ecd4afc264e3b79187e301ad860 /storage/perfschema
parent69091c4949421b8f5a58ae6c72b722f392dde51d (diff)
downloadmariadb-git-a1d90f124f30f30d3bc609d7a5783341089e54ce.tar.gz
Bug#55462 Performance schema: reduce the overhead of PFS_events_waits::m_wait_class
This is a performance improvement fix. Removed the "volatile" property of PFS_events_waits::m_wait_class. Simplified the code accordingly.
Diffstat (limited to 'storage/perfschema')
-rw-r--r--storage/perfschema/pfs.cc7
-rw-r--r--storage/perfschema/pfs_events_waits.cc25
-rw-r--r--storage/perfschema/pfs_events_waits.h2
-rw-r--r--storage/perfschema/table_events_waits.cc12
4 files changed, 7 insertions, 39 deletions
diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc
index 93c9cf14e9d..60ef2a3b194 100644
--- a/storage/perfschema/pfs.cc
+++ b/storage/perfschema/pfs.cc
@@ -1117,7 +1117,6 @@ get_thread_mutex_locker_v1(PSI_mutex_locker_state *state,
}
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_mutex= pfs_mutex;
pfs_locker->m_waits_current.m_thread= pfs_thread;
@@ -1163,7 +1162,6 @@ get_thread_rwlock_locker_v1(PSI_rwlock_locker_state *state,
}
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_rwlock= pfs_rwlock;
pfs_locker->m_waits_current.m_thread= pfs_thread;
@@ -1222,7 +1220,6 @@ get_thread_cond_locker_v1(PSI_cond_locker_state *state,
}
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_cond= pfs_cond;
pfs_locker->m_waits_current.m_thread= pfs_thread;
@@ -1267,7 +1264,6 @@ get_thread_table_locker_v1(PSI_table_locker_state *state,
}
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_table= pfs_table;
pfs_locker->m_waits_current.m_thread= pfs_thread;
@@ -1320,7 +1316,6 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state,
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_file= pfs_file;
pfs_locker->m_waits_current.m_thread= pfs_thread;
@@ -1372,7 +1367,6 @@ get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
}
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_file= pfs_file;
pfs_locker->m_waits_current.m_thread= pfs_thread;
@@ -1441,7 +1435,6 @@ get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
}
PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack
[pfs_thread->m_wait_locker_count];
- pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS;
pfs_locker->m_target.m_file= pfs_file;
pfs_locker->m_waits_current.m_thread= pfs_thread;
diff --git a/storage/perfschema/pfs_events_waits.cc b/storage/perfschema/pfs_events_waits.cc
index e32a77512cc..b6cadf9e61c 100644
--- a/storage/perfschema/pfs_events_waits.cc
+++ b/storage/perfschema/pfs_events_waits.cc
@@ -80,25 +80,10 @@ void cleanup_events_waits_history_long(void)
events_waits_history_long_array= NULL;
}
-static void copy_events_waits(PFS_events_waits *dest,
- const PFS_events_waits *source)
+static inline void copy_events_waits(PFS_events_waits *dest,
+ const PFS_events_waits *source)
{
- /* m_wait_class must be the first member of PFS_events_waits. */
- compile_time_assert(offsetof(PFS_events_waits, m_wait_class) == 0);
-
- char* dest_body= (reinterpret_cast<char*> (dest)) + sizeof(events_waits_class);
- const char* source_body= (reinterpret_cast<const char*> (source))
- + sizeof(events_waits_class);
-
- /* See comments in table_events_waits_common::make_row(). */
-
- /* Signal readers they are about to read garbage ... */
- dest->m_wait_class= NO_WAIT_CLASS;
- /* ... that this can generate. */
- memcpy(dest_body, source_body,
- sizeof(PFS_events_waits) - sizeof(events_waits_class));
- /* Signal readers the record is now clean again. */
- dest->m_wait_class= source->m_wait_class;
+ memcpy(dest, source, sizeof(PFS_events_waits));
}
/**
@@ -116,9 +101,7 @@ void insert_events_waits_history(PFS_thread *thread, PFS_events_waits *wait)
causing a potential race condition.
We are not testing for this and insert a possibly empty record,
to make this thread (the writer) faster.
- This is ok, the truncated data will have
- wait->m_wait_class == NO_WAIT_CLASS,
- which readers of m_waits_history will filter out.
+ This is ok, the readers of m_waits_history will filter this out.
*/
copy_events_waits(&thread->m_waits_history[index], wait);
diff --git a/storage/perfschema/pfs_events_waits.h b/storage/perfschema/pfs_events_waits.h
index 9a5ed8644f3..d277db39d8d 100644
--- a/storage/perfschema/pfs_events_waits.h
+++ b/storage/perfschema/pfs_events_waits.h
@@ -97,7 +97,7 @@ struct PFS_events_waits
- TRUNCATE EVENTS_WAITS_HISTORY
- TRUNCATE EVENTS_WAITS_HISTORY_LONG
*/
- volatile events_waits_class m_wait_class;
+ events_waits_class m_wait_class;
/** Executing thread. */
PFS_thread *m_thread;
/** Instrument metadata. */
diff --git a/storage/perfschema/table_events_waits.cc b/storage/perfschema/table_events_waits.cc
index a09d7f1ba30..5e5972b68f1 100644
--- a/storage/perfschema/table_events_waits.cc
+++ b/storage/perfschema/table_events_waits.cc
@@ -217,16 +217,8 @@ void table_events_waits_common::make_row(bool thread_own_wait,
or 8 atomics per recorded event.
The problem is that we record a *lot* of events ...
- Instead, a *dirty* marking is done using m_wait_class.
- Using m_wait_class alone does not guarantee anything, it just filters
- out most of the bad data.
- This is acceptable because this code is garbage-proof,
- and won't crash on bad data, only display it,
- very rarely (which is accepted).
-
- If a bad record is displayed, it's a very transient failure:
- the next select * from EVENTS_WAITS_CURRENT/_HISTORY/_HISTORY_LONG will
- show clean data again.
+ This code is prepared to accept *dirty* records,
+ and sanitizes all the data before returning a row.
*/
m_row.m_thread_internal_id= safe_thread->m_thread_internal_id;