diff options
author | costan <costan@google.com> | 2019-03-11 13:04:53 -0700 |
---|---|---|
committer | Chris Mumford <cmumford@google.com> | 2019-03-11 13:41:25 -0700 |
commit | 7d8e41e49b8fddda66a2c5f0a6a47f1a916e8d26 (patch) | |
tree | 0a5556aaf20ba27bd6ea0ab3792d1366e60b2f81 /util | |
parent | dd906262fd364c08a652dfa914f9995f6b7608a9 (diff) | |
download | leveldb-7d8e41e49b8fddda66a2c5f0a6a47f1a916e8d26.tar.gz |
leveldb: Replace AtomicPointer with std::atomic.
This CL removes AtomicPointer from leveldb's port interface. Its usage is replaced with std::atomic<> from the C++11 standard library.
AtomicPointer was used to wrap flags, numbers, and pointers, so its instances are replaced with std::atomic<bool>, std::atomic<int>, std::atomic<size_t> and std::atomic<Node*>.
This CL does not revise the memory ordering. AtomicPointer's methods are replaced mechanically with their std::atomic equivalents, even when the underlying usage is incorrect. (Example: DBImpl::has_imm_ is written using release stores, even though it is always read using relaxed ordering.) Revising the memory ordering is left for future CLs.
-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=237865146
Diffstat (limited to 'util')
-rw-r--r-- | util/arena.cc | 5 | ||||
-rw-r--r-- | util/arena.h | 17 | ||||
-rw-r--r-- | util/env_test.cc | 57 | ||||
-rw-r--r-- | util/env_windows.cc | 56 |
4 files changed, 67 insertions, 68 deletions
diff --git a/util/arena.cc b/util/arena.cc index a0338bf..a496ad0 100644 --- a/util/arena.cc +++ b/util/arena.cc @@ -3,7 +3,6 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/arena.h" -#include <assert.h> namespace leveldb { @@ -60,8 +59,8 @@ char* Arena::AllocateAligned(size_t bytes) { char* Arena::AllocateNewBlock(size_t block_bytes) { char* result = new char[block_bytes]; blocks_.push_back(result); - memory_usage_.NoBarrier_Store( - reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*))); + memory_usage_.fetch_add(block_bytes + sizeof(char*), + std::memory_order_relaxed); return result; } diff --git a/util/arena.h b/util/arena.h index 48bab33..624e262 100644 --- a/util/arena.h +++ b/util/arena.h @@ -5,11 +5,11 @@ #ifndef STORAGE_LEVELDB_UTIL_ARENA_H_ #define STORAGE_LEVELDB_UTIL_ARENA_H_ +#include <atomic> +#include <cassert> +#include <cstddef> +#include <cstdint> #include <vector> -#include <assert.h> -#include <stddef.h> -#include <stdint.h> -#include "port/port.h" namespace leveldb { @@ -21,13 +21,13 @@ class Arena { // Return a pointer to a newly allocated memory block of "bytes" bytes. char* Allocate(size_t bytes); - // Allocate memory with the normal alignment guarantees provided by malloc + // Allocate memory with the normal alignment guarantees provided by malloc. char* AllocateAligned(size_t bytes); // Returns an estimate of the total memory usage of data allocated // by the arena. size_t MemoryUsage() const { - return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load()); + return memory_usage_.load(std::memory_order_relaxed); } private: @@ -42,7 +42,10 @@ class Arena { std::vector<char*> blocks_; // Total memory usage of the arena. - port::AtomicPointer memory_usage_; + // + // TODO(costan): This member is accessed via atomics, but the others are + // accessed without any locking. Is this OK? + std::atomic<size_t> memory_usage_; // No copying allowed Arena(const Arena&); diff --git a/util/env_test.cc b/util/env_test.cc index 070109b..b204089 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -5,6 +5,7 @@ #include "leveldb/env.h" #include <algorithm> +#include <atomic> #include "port/port.h" #include "port/thread_annotations.h" @@ -24,10 +25,15 @@ class EnvTest { EnvTest() : env_(Env::Default()) { } }; -static void SetBool(void* ptr) { - reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr); +namespace { + +static void SetAtomicBool(void* atomic_bool_ptr) { + std::atomic<bool>* atomic_bool = + reinterpret_cast<std::atomic<bool>*>(atomic_bool_ptr); + atomic_bool->store(true, std::memory_order_relaxed); } +} // namespace TEST(EnvTest, ReadWrite) { Random rnd(test::RandomSeed()); @@ -77,42 +83,41 @@ TEST(EnvTest, ReadWrite) { } TEST(EnvTest, RunImmediately) { - port::AtomicPointer called(nullptr); - env_->Schedule(&SetBool, &called); + std::atomic<bool> called(false); + env_->Schedule(&SetAtomicBool, &called); env_->SleepForMicroseconds(kDelayMicros); - ASSERT_TRUE(called.NoBarrier_Load() != nullptr); + ASSERT_TRUE(called.load(std::memory_order_relaxed)); } TEST(EnvTest, RunMany) { - port::AtomicPointer last_id(nullptr); + std::atomic<int> last_id(0); - struct CB { - port::AtomicPointer* last_id_ptr; // Pointer to shared slot - uintptr_t id; // Order# for the execution of this callback + struct Callback { + std::atomic<int>* const last_id_ptr_; // Pointer to shared state. + const int id_; // Order# for the execution of this callback. - CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { } + Callback(std::atomic<int>* last_id_ptr, int id) + : last_id_ptr_(last_id_ptr), id_(id) { } - static void Run(void* v) { - CB* cb = reinterpret_cast<CB*>(v); - void* cur = cb->last_id_ptr->NoBarrier_Load(); - ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur)); - cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id)); + static void Run(void* arg) { + Callback* callback = reinterpret_cast<Callback*>(arg); + int current_id = callback->last_id_ptr_->load(std::memory_order_relaxed); + ASSERT_EQ(callback->id_ - 1, current_id); + callback->last_id_ptr_->store(callback->id_, std::memory_order_relaxed); } }; - // Schedule in different order than start time - CB cb1(&last_id, 1); - CB cb2(&last_id, 2); - CB cb3(&last_id, 3); - CB cb4(&last_id, 4); - env_->Schedule(&CB::Run, &cb1); - env_->Schedule(&CB::Run, &cb2); - env_->Schedule(&CB::Run, &cb3); - env_->Schedule(&CB::Run, &cb4); + Callback callback1(&last_id, 1); + Callback callback2(&last_id, 2); + Callback callback3(&last_id, 3); + Callback callback4(&last_id, 4); + env_->Schedule(&Callback::Run, &callback1); + env_->Schedule(&Callback::Run, &callback2); + env_->Schedule(&Callback::Run, &callback3); + env_->Schedule(&Callback::Run, &callback4); env_->SleepForMicroseconds(kDelayMicros); - void* cur = last_id.Acquire_Load(); - ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur)); + ASSERT_EQ(4, last_id.load(std::memory_order_relaxed)); } struct State { diff --git a/util/env_windows.cc b/util/env_windows.cc index 57932bb..3b4496b 100644 --- a/util/env_windows.cc +++ b/util/env_windows.cc @@ -8,6 +8,7 @@ #include <windows.h> #include <algorithm> +#include <atomic> #include <chrono> #include <condition_variable> #include <deque> @@ -105,51 +106,42 @@ class ScopedHandle { }; // Helper class to limit resource usage to avoid exhaustion. -// Currently used to limit mmap file usage so that we do not end -// up running out virtual memory, or running into kernel performance -// problems for very large databases. +// Currently used to limit read-only file descriptors and mmap file usage +// so that we do not run out of file descriptors or virtual memory, or run into +// kernel performance problems for very large databases. class Limiter { public: - // Limit maximum number of resources to |n|. - Limiter(intptr_t n) { SetAllowed(n); } + // Limit maximum number of resources to |max_acquires|. + Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} + + Limiter(const Limiter&) = delete; + Limiter operator=(const Limiter&) = delete; // If another resource is available, acquire it and return true. // Else return false. - bool Acquire() LOCKS_EXCLUDED(mu_) { - if (GetAllowed() <= 0) { - return false; - } - MutexLock l(&mu_); - intptr_t x = GetAllowed(); - if (x <= 0) { - return false; - } else { - SetAllowed(x - 1); + bool Acquire() { + int old_acquires_allowed = + acquires_allowed_.fetch_sub(1, std::memory_order_relaxed); + + if (old_acquires_allowed > 0) return true; - } + + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + return false; } // Release a resource acquired by a previous call to Acquire() that returned // true. - void Release() LOCKS_EXCLUDED(mu_) { - MutexLock l(&mu_); - SetAllowed(GetAllowed() + 1); + void Release() { + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } private: - port::Mutex mu_; - port::AtomicPointer allowed_; - - intptr_t GetAllowed() const { - return reinterpret_cast<intptr_t>(allowed_.Acquire_Load()); - } - - void SetAllowed(intptr_t v) EXCLUSIVE_LOCKS_REQUIRED(mu_) { - allowed_.Release_Store(reinterpret_cast<void*>(v)); - } - - Limiter(const Limiter&); - void operator=(const Limiter&); + // The number of available resources. + // + // This is a counter and is not tied to the invariants of any other class, so + // it can be operated on safely using std::memory_order_relaxed. + std::atomic<int> acquires_allowed_; }; class WindowsSequentialFile : public SequentialFile { |