summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2018-10-05 17:40:06 +0300
committerGitHub <noreply@github.com>2018-10-05 17:40:06 +0300
commit079d0a8724aa3faa3117554c44b81965df457ef3 (patch)
treea9d5eb7db1bed39764c7367441f00e46b74ddfbc
parent1655053ac1c0b175d860defa2819b29642f664a7 (diff)
parent15803fce92a9e3ec7daab79d21990b499b1a709e (diff)
downloadmariadb-git-079d0a8724aa3faa3117554c44b81965df457ef3.tar.gz
Merge pull request #876 from tempesta-tech/tt-10.1-MDEV-17313-counter-race
MDEV-17313 Data race in ib_counter_t
-rw-r--r--storage/innobase/include/ut0counter.h46
-rw-r--r--storage/xtradb/include/ut0counter.h46
2 files changed, 54 insertions, 38 deletions
diff --git a/storage/innobase/include/ut0counter.h b/storage/innobase/include/ut0counter.h
index edc0db3b03d..6fde8a7a638 100644
--- a/storage/innobase/include/ut0counter.h
+++ b/storage/innobase/include/ut0counter.h
@@ -32,6 +32,7 @@ Created 2012/04/12 by Sunny Bains
#include <string.h>
#include "os0thread.h"
#include "os0sync.h"
+#include "my_atomic.h"
/** Default number of slots to use in ib_counter_t */
#define IB_N_SLOTS 64
@@ -81,8 +82,8 @@ struct thread_id_indexer_t : public generic_indexer_t<Type, N> {
}
};
-/** Class for using fuzzy counters. The counter is not protected by any
-mutex and the results are not guaranteed to be 100% accurate but close
+/** Class for using fuzzy counters. The counter is relaxed atomic
+so the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the
CACHE_LINE_SIZE bytes */
template <
@@ -91,20 +92,6 @@ template <
template<typename, int> class Indexer = thread_id_indexer_t>
struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
{
-#ifdef UNIV_DEBUG
- ~ib_counter_t()
- {
- size_t n = (CACHE_LINE_SIZE / sizeof(Type));
-
- /* Check that we aren't writing outside our defined bounds. */
- for (size_t i = 0; i < UT_ARR_SIZE(m_counter); i += n) {
- for (size_t j = 1; j < n - 1; ++j) {
- ut_ad(m_counter[i + j] == 0);
- }
- }
- }
-#endif /* UNIV_DEBUG */
-
/** Increment the counter by 1. */
void inc() UNIV_NOTHROW { add(1); }
@@ -124,15 +111,36 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
ut_ad(i < UT_ARR_SIZE(m_counter));
- m_counter[i] += n;
+ if (sizeof(Type) == 8) {
+ my_atomic_add64_explicit(
+ reinterpret_cast<int64*>(&m_counter[i]),
+ static_cast<int64>(n), MY_MEMORY_ORDER_RELAXED);
+ } else if (sizeof(Type) == 4) {
+ my_atomic_add32_explicit(
+ reinterpret_cast<int32*>(&m_counter[i]),
+ static_cast<int32>(n), MY_MEMORY_ORDER_RELAXED);
+ }
+ compile_time_assert(sizeof(Type) == 8 || sizeof(Type) == 4);
}
- /* @return total value - not 100% accurate, since it is not atomic. */
+ /* @return total value - not 100% accurate, since it is relaxed atomic. */
operator Type() const UNIV_NOTHROW {
Type total = 0;
for (size_t i = 0; i < N; ++i) {
- total += m_counter[m_policy.offset(i)];
+ if (sizeof(Type) == 8) {
+ total += static_cast<
+ Type>(my_atomic_load64_explicit(
+ reinterpret_cast<int64*>(const_cast<Type*>(
+ &m_counter[m_policy.offset(i)])),
+ MY_MEMORY_ORDER_RELAXED));
+ } else if (sizeof(Type) == 4) {
+ total += static_cast<
+ Type>(my_atomic_load32_explicit(
+ reinterpret_cast<int32*>(const_cast<Type*>(
+ &m_counter[m_policy.offset(i)])),
+ MY_MEMORY_ORDER_RELAXED));
+ }
}
return(total);
diff --git a/storage/xtradb/include/ut0counter.h b/storage/xtradb/include/ut0counter.h
index edc0db3b03d..6fde8a7a638 100644
--- a/storage/xtradb/include/ut0counter.h
+++ b/storage/xtradb/include/ut0counter.h
@@ -32,6 +32,7 @@ Created 2012/04/12 by Sunny Bains
#include <string.h>
#include "os0thread.h"
#include "os0sync.h"
+#include "my_atomic.h"
/** Default number of slots to use in ib_counter_t */
#define IB_N_SLOTS 64
@@ -81,8 +82,8 @@ struct thread_id_indexer_t : public generic_indexer_t<Type, N> {
}
};
-/** Class for using fuzzy counters. The counter is not protected by any
-mutex and the results are not guaranteed to be 100% accurate but close
+/** Class for using fuzzy counters. The counter is relaxed atomic
+so the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the
CACHE_LINE_SIZE bytes */
template <
@@ -91,20 +92,6 @@ template <
template<typename, int> class Indexer = thread_id_indexer_t>
struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
{
-#ifdef UNIV_DEBUG
- ~ib_counter_t()
- {
- size_t n = (CACHE_LINE_SIZE / sizeof(Type));
-
- /* Check that we aren't writing outside our defined bounds. */
- for (size_t i = 0; i < UT_ARR_SIZE(m_counter); i += n) {
- for (size_t j = 1; j < n - 1; ++j) {
- ut_ad(m_counter[i + j] == 0);
- }
- }
- }
-#endif /* UNIV_DEBUG */
-
/** Increment the counter by 1. */
void inc() UNIV_NOTHROW { add(1); }
@@ -124,15 +111,36 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
ut_ad(i < UT_ARR_SIZE(m_counter));
- m_counter[i] += n;
+ if (sizeof(Type) == 8) {
+ my_atomic_add64_explicit(
+ reinterpret_cast<int64*>(&m_counter[i]),
+ static_cast<int64>(n), MY_MEMORY_ORDER_RELAXED);
+ } else if (sizeof(Type) == 4) {
+ my_atomic_add32_explicit(
+ reinterpret_cast<int32*>(&m_counter[i]),
+ static_cast<int32>(n), MY_MEMORY_ORDER_RELAXED);
+ }
+ compile_time_assert(sizeof(Type) == 8 || sizeof(Type) == 4);
}
- /* @return total value - not 100% accurate, since it is not atomic. */
+ /* @return total value - not 100% accurate, since it is relaxed atomic. */
operator Type() const UNIV_NOTHROW {
Type total = 0;
for (size_t i = 0; i < N; ++i) {
- total += m_counter[m_policy.offset(i)];
+ if (sizeof(Type) == 8) {
+ total += static_cast<
+ Type>(my_atomic_load64_explicit(
+ reinterpret_cast<int64*>(const_cast<Type*>(
+ &m_counter[m_policy.offset(i)])),
+ MY_MEMORY_ORDER_RELAXED));
+ } else if (sizeof(Type) == 4) {
+ total += static_cast<
+ Type>(my_atomic_load32_explicit(
+ reinterpret_cast<int32*>(const_cast<Type*>(
+ &m_counter[m_policy.offset(i)])),
+ MY_MEMORY_ORDER_RELAXED));
+ }
}
return(total);