diff options
author | Sergey Vojtovich <svoj@mariadb.org> | 2020-03-21 18:36:31 +0400 |
---|---|---|
committer | Sergey Vojtovich <svoj@mariadb.org> | 2020-03-21 18:37:19 +0400 |
commit | 62687801ffedc8521e3dbb111f26e7668902949c (patch) | |
tree | 364b713b1ac253d1a17ec164fc01af565ab69da7 /sql/table_cache.cc | |
parent | 3b3f931570266487d9a9a2536514fe37b5297b37 (diff) | |
download | mariadb-git-62687801ffedc8521e3dbb111f26e7668902949c.tar.gz |
tc_active_instances: my_atomic to std::atomic
Diffstat (limited to 'sql/table_cache.cc')
-rw-r--r-- | sql/table_cache.cc | 34 |
1 files changed, 21 insertions, 13 deletions
diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 0743525b944..15255c56083 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -56,7 +56,7 @@ ulong tdc_size; /**< Table definition cache threshold for LRU eviction. */ ulong tc_size; /**< Table cache threshold for LRU eviction. */ uint32 tc_instances; -uint32 tc_active_instances= 1; +static std::atomic<uint32_t> tc_active_instances(1); static std::atomic<bool> tc_contention_warning_reported; /** Data collections. */ @@ -163,7 +163,7 @@ struct Table_cache_instance overhead on TABLE object release. All other table cache mutex acquistions are considered out of hot path and are not instrumented either. */ - void lock_and_check_contention(uint32 n_instances, uint32 instance) + void lock_and_check_contention(uint32_t n_instances, uint32_t instance) { if (mysql_mutex_trylock(&LOCK_table_cache)) { @@ -172,11 +172,10 @@ struct Table_cache_instance { if (n_instances < tc_instances) { - if (my_atomic_cas32_weak_explicit((int32*) &tc_active_instances, - (int32*) &n_instances, - (int32) n_instances + 1, - MY_MEMORY_ORDER_RELAXED, - MY_MEMORY_ORDER_RELAXED)) + if (tc_active_instances. + compare_exchange_weak(n_instances, n_instances + 1, + std::memory_order_relaxed, + std::memory_order_relaxed)) { sql_print_information("Detected table cache mutex contention at instance %d: " "%d%% waits. Additional table cache instance " @@ -354,8 +353,8 @@ void tc_purge(bool mark_flushed) void tc_add_table(THD *thd, TABLE *table) { - uint32 i= thd->thread_id % my_atomic_load32_explicit((int32*) &tc_active_instances, - MY_MEMORY_ORDER_RELAXED); + uint32_t i= + thd->thread_id % tc_active_instances.load(std::memory_order_relaxed); TABLE *LRU_table= 0; TDC_element *element= table->s->tdc; @@ -408,10 +407,8 @@ void tc_add_table(THD *thd, TABLE *table) TABLE *tc_acquire_table(THD *thd, TDC_element *element) { - uint32 n_instances= - my_atomic_load32_explicit((int32*) &tc_active_instances, - MY_MEMORY_ORDER_RELAXED); - uint32 i= thd->thread_id % n_instances; + uint32_t n_instances= tc_active_instances.load(std::memory_order_relaxed); + uint32_t i= thd->thread_id % n_instances; TABLE *table; tc[i].lock_and_check_contention(n_instances, i); @@ -1342,3 +1339,14 @@ int tdc_iterate(THD *thd, my_hash_walk_action action, void *argument, } return res; } + + +int show_tc_active_instances(THD *thd, SHOW_VAR *var, char *buff, + enum enum_var_type scope) +{ + var->type= SHOW_UINT; + var->value= buff; + *(reinterpret_cast<uint32_t*>(buff))= + tc_active_instances.load(std::memory_order_relaxed); + return 0; +} |