diff options
Diffstat (limited to 'storage')
-rw-r--r-- | storage/innobase/CMakeLists.txt | 2 | ||||
-rw-r--r-- | storage/perfschema/pfs.cc | 7 | ||||
-rw-r--r-- | storage/perfschema/pfs_events_waits.cc | 25 | ||||
-rw-r--r-- | storage/perfschema/pfs_events_waits.h | 2 | ||||
-rw-r--r-- | storage/perfschema/table_events_waits.cc | 12 |
5 files changed, 8 insertions, 40 deletions
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index 218dbad2df0..2575580d646 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -43,7 +43,7 @@ ENDIF() # Enable InnoDB's UNIV_DEBUG if MySQL's WITH_DEBUG[_FULL] is defined # enable when this bug is resolved: # Bug#54861 Additional connections not handled properly in mtr --embedded -#IF(WITH_DEBUG OR WITH_DEBUG_FULL) +#IF(WITH_DEBUG) # ADD_DEFINITIONS("-DUNIV_DEBUG") #ENDIF() diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index 93c9cf14e9d..60ef2a3b194 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -1117,7 +1117,6 @@ get_thread_mutex_locker_v1(PSI_mutex_locker_state *state, } PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_mutex= pfs_mutex; pfs_locker->m_waits_current.m_thread= pfs_thread; @@ -1163,7 +1162,6 @@ get_thread_rwlock_locker_v1(PSI_rwlock_locker_state *state, } PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_rwlock= pfs_rwlock; pfs_locker->m_waits_current.m_thread= pfs_thread; @@ -1222,7 +1220,6 @@ get_thread_cond_locker_v1(PSI_cond_locker_state *state, } PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_cond= pfs_cond; pfs_locker->m_waits_current.m_thread= pfs_thread; @@ -1267,7 +1264,6 @@ get_thread_table_locker_v1(PSI_table_locker_state *state, } PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_table= pfs_table; pfs_locker->m_waits_current.m_thread= pfs_thread; @@ -1320,7 +1316,6 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state, PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_file= pfs_file; pfs_locker->m_waits_current.m_thread= pfs_thread; @@ -1372,7 +1367,6 @@ get_thread_file_stream_locker_v1(PSI_file_locker_state *state, } PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_file= pfs_file; pfs_locker->m_waits_current.m_thread= pfs_thread; @@ -1441,7 +1435,6 @@ get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state, } PFS_wait_locker *pfs_locker= &pfs_thread->m_wait_locker_stack [pfs_thread->m_wait_locker_count]; - pfs_locker->m_waits_current.m_wait_class= NO_WAIT_CLASS; pfs_locker->m_target.m_file= pfs_file; pfs_locker->m_waits_current.m_thread= pfs_thread; diff --git a/storage/perfschema/pfs_events_waits.cc b/storage/perfschema/pfs_events_waits.cc index e32a77512cc..b6cadf9e61c 100644 --- a/storage/perfschema/pfs_events_waits.cc +++ b/storage/perfschema/pfs_events_waits.cc @@ -80,25 +80,10 @@ void cleanup_events_waits_history_long(void) events_waits_history_long_array= NULL; } -static void copy_events_waits(PFS_events_waits *dest, - const PFS_events_waits *source) +static inline void copy_events_waits(PFS_events_waits *dest, + const PFS_events_waits *source) { - /* m_wait_class must be the first member of PFS_events_waits. */ - compile_time_assert(offsetof(PFS_events_waits, m_wait_class) == 0); - - char* dest_body= (reinterpret_cast<char*> (dest)) + sizeof(events_waits_class); - const char* source_body= (reinterpret_cast<const char*> (source)) - + sizeof(events_waits_class); - - /* See comments in table_events_waits_common::make_row(). */ - - /* Signal readers they are about to read garbage ... */ - dest->m_wait_class= NO_WAIT_CLASS; - /* ... that this can generate. */ - memcpy(dest_body, source_body, - sizeof(PFS_events_waits) - sizeof(events_waits_class)); - /* Signal readers the record is now clean again. */ - dest->m_wait_class= source->m_wait_class; + memcpy(dest, source, sizeof(PFS_events_waits)); } /** @@ -116,9 +101,7 @@ void insert_events_waits_history(PFS_thread *thread, PFS_events_waits *wait) causing a potential race condition. We are not testing for this and insert a possibly empty record, to make this thread (the writer) faster. - This is ok, the truncated data will have - wait->m_wait_class == NO_WAIT_CLASS, - which readers of m_waits_history will filter out. + This is ok, the readers of m_waits_history will filter this out. */ copy_events_waits(&thread->m_waits_history[index], wait); diff --git a/storage/perfschema/pfs_events_waits.h b/storage/perfschema/pfs_events_waits.h index 9a5ed8644f3..d277db39d8d 100644 --- a/storage/perfschema/pfs_events_waits.h +++ b/storage/perfschema/pfs_events_waits.h @@ -97,7 +97,7 @@ struct PFS_events_waits - TRUNCATE EVENTS_WAITS_HISTORY - TRUNCATE EVENTS_WAITS_HISTORY_LONG */ - volatile events_waits_class m_wait_class; + events_waits_class m_wait_class; /** Executing thread. */ PFS_thread *m_thread; /** Instrument metadata. */ diff --git a/storage/perfschema/table_events_waits.cc b/storage/perfschema/table_events_waits.cc index a09d7f1ba30..5e5972b68f1 100644 --- a/storage/perfschema/table_events_waits.cc +++ b/storage/perfschema/table_events_waits.cc @@ -217,16 +217,8 @@ void table_events_waits_common::make_row(bool thread_own_wait, or 8 atomics per recorded event. The problem is that we record a *lot* of events ... - Instead, a *dirty* marking is done using m_wait_class. - Using m_wait_class alone does not guarantee anything, it just filters - out most of the bad data. - This is acceptable because this code is garbage-proof, - and won't crash on bad data, only display it, - very rarely (which is accepted). - - If a bad record is displayed, it's a very transient failure: - the next select * from EVENTS_WAITS_CURRENT/_HISTORY/_HISTORY_LONG will - show clean data again. + This code is prepared to accept *dirty* records, + and sanitizes all the data before returning a row. */ m_row.m_thread_internal_id= safe_thread->m_thread_internal_id; |