summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-07-12 12:50:36 +0000
committerDmitry Vyukov <dvyukov@google.com>2017-07-12 12:50:36 +0000
commit14a186f061650792da335633a278a409a5664ac1 (patch)
tree1a9fae3e6210a61113bd5a0449c1993cd32ade57
parent6e32365b3ee6d2661034d83a3e80eb2dea6da1a4 (diff)
downloadcompiler-rt-14a186f061650792da335633a278a409a5664ac1.tar.gz
tsan: refactor SyncClock code
1. Add SyncClock::ResetImpl which removes code duplication between ctor and Reset. 2. Move SyncClock::Resize to SyncClock methods, currently it's defined between ThreadClock methods. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@307785 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/tsan/rtl/tsan_clock.cc108
-rw-r--r--lib/tsan/rtl/tsan_clock.h1
2 files changed, 54 insertions, 55 deletions
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 362921878..4a91683ea 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -300,53 +300,6 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
return true;
}
-void SyncClock::Resize(ClockCache *c, uptr nclk) {
- CPP_STAT_INC(StatClockReleaseResize);
- if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
- RoundUpTo(size_, ClockBlock::kClockCount)) {
- // Growing within the same block.
- // Memory is already allocated, just increase the size.
- size_ = nclk;
- return;
- }
- if (nclk <= ClockBlock::kClockCount) {
- // Grow from 0 to one-level table.
- CHECK_EQ(size_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
- size_ = nclk;
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- return;
- }
- // Growing two-level table.
- if (size_ == 0) {
- // Allocate first level table.
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- } else if (size_ <= ClockBlock::kClockCount) {
- // Transform one-level table to two-level table.
- u32 old = tab_idx_;
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- tab_->table[0] = old;
- }
- // At this point we have first level table allocated.
- // Add second level tables as necessary.
- for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
- i < nclk; i += ClockBlock::kClockCount) {
- u32 idx = ctx->clock_alloc.Alloc(c);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- internal_memset(cb, 0, sizeof(*cb));
- CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
- tab_->table[i/ClockBlock::kClockCount] = idx;
- }
- size_ = nclk;
-}
-
// Sets a single element in the vector clock.
// This function is called only from weird places like AcquireGlobal.
void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
@@ -369,14 +322,8 @@ void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
tid_, reused_, last_acquire_);
}
-SyncClock::SyncClock()
- : release_store_tid_(kInvalidTid)
- , release_store_reused_()
- , tab_()
- , tab_idx_()
- , size_() {
- for (uptr i = 0; i < kDirtyTids; i++)
- dirty_tids_[i] = kInvalidTid;
+SyncClock::SyncClock() {
+ ResetImpl();
}
SyncClock::~SyncClock() {
@@ -398,6 +345,10 @@ void SyncClock::Reset(ClockCache *c) {
ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
ctx->clock_alloc.Free(c, tab_idx_);
}
+ ResetImpl();
+}
+
+void SyncClock::ResetImpl() {
tab_ = 0;
tab_idx_ = 0;
size_ = 0;
@@ -407,6 +358,53 @@ void SyncClock::Reset(ClockCache *c) {
dirty_tids_[i] = kInvalidTid;
}
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
+ RoundUpTo(size_, ClockBlock::kClockCount)) {
+ // Growing within the same block.
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (nclk <= ClockBlock::kClockCount) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ size_ = nclk;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ return;
+ }
+ // Growing two-level table.
+ if (size_ == 0) {
+ // Allocate first level table.
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // Transform one-level table to two-level table.
+ u32 old = tab_idx_;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ tab_->table[0] = old;
+ }
+ // At this point we have first level table allocated.
+ // Add second level tables as necessary.
+ for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
+ i < nclk; i += ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
+ tab_->table[i/ClockBlock::kClockCount] = idx;
+ }
+ size_ = nclk;
+}
+
ClockElem &SyncClock::elem(unsigned tid) const {
DCHECK_LT(tid, size_);
if (size_ <= ClockBlock::kClockCount)
diff --git a/lib/tsan/rtl/tsan_clock.h b/lib/tsan/rtl/tsan_clock.h
index 90eaca28a..378b550fd 100644
--- a/lib/tsan/rtl/tsan_clock.h
+++ b/lib/tsan/rtl/tsan_clock.h
@@ -74,6 +74,7 @@ class SyncClock {
u32 tab_idx_;
u32 size_;
+ void ResetImpl();
ClockElem &elem(unsigned tid) const;
};