summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
authorVictor Costan <costan@google.com>2019-11-21 13:09:53 -0800
committerVictor Costan <pwnall@chromium.org>2019-11-21 13:11:40 -0800
commit1c58902bdcc8d129f3883606bbd8e59085b48878 (patch)
tree8ca631cdc3575fa2f33be0dc28b9d94d610202a1 /db
parent2c9c80bd539ca5aad5ea864ee6dd81c1ee3eb91e (diff)
downloadleveldb-1c58902bdcc8d129f3883606bbd8e59085b48878.tar.gz
Switch testing harness to googletest.
PiperOrigin-RevId: 281815695
Diffstat (limited to 'db')
-rw-r--r--db/autocompact_test.cc25
-rw-r--r--db/corruption_test.cc72
-rw-r--r--db/db_test.cc418
-rw-r--r--db/dbformat_test.cc10
-rw-r--r--db/fault_injection_test.cc40
-rw-r--r--db/filename_test.cc9
-rw-r--r--db/log_test.cc87
-rw-r--r--db/recovery_test.cc66
-rw-r--r--db/skiplist_test.cc10
-rw-r--r--db/version_edit_test.cc10
-rw-r--r--db/version_set_test.cc38
-rw-r--r--db/write_batch_test.cc14
12 files changed, 418 insertions, 381 deletions
diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index e6c97a0..d4caf71 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -2,24 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"
#include "leveldb/db.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
-class AutoCompactTest {
+class AutoCompactTest : public testing::Test {
public:
AutoCompactTest() {
- dbname_ = test::TmpDir() + "/autocompact_test";
+ dbname_ = testing::TempDir() + "autocompact_test";
tiny_cache_ = NewLRUCache(100);
options_.block_cache = tiny_cache_;
DestroyDB(dbname_, options_);
options_.create_if_missing = true;
options_.compression = kNoCompression;
- ASSERT_OK(DB::Open(options_, dbname_, &db_));
+ EXPECT_LEVELDB_OK(DB::Open(options_, dbname_, &db_));
}
~AutoCompactTest() {
@@ -62,15 +62,15 @@ void AutoCompactTest::DoReads(int n) {
// Fill database
for (int i = 0; i < kCount; i++) {
- ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), Key(i), value));
}
- ASSERT_OK(dbi->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
// Delete everything
for (int i = 0; i < kCount; i++) {
- ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), Key(i)));
}
- ASSERT_OK(dbi->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
// Get initial measurement of the space we will be reading.
const int64_t initial_size = Size(Key(0), Key(n));
@@ -103,10 +103,13 @@ void AutoCompactTest::DoReads(int n) {
ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
}
-TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
+TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); }
-TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
+TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index 42f5237..4d20946 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -4,6 +4,7 @@
#include <sys/types.h>
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
@@ -13,14 +14,13 @@
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
#include "util/logging.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
static const int kValueSize = 1000;
-class CorruptionTest {
+class CorruptionTest : public testing::Test {
public:
CorruptionTest()
: db_(nullptr),
@@ -46,12 +46,12 @@ class CorruptionTest {
return DB::Open(options_, dbname_, &db_);
}
- void Reopen() { ASSERT_OK(TryReopen()); }
+ void Reopen() { ASSERT_LEVELDB_OK(TryReopen()); }
void RepairDB() {
delete db_;
db_ = nullptr;
- ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
+ ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_));
}
void Build(int n) {
@@ -68,7 +68,7 @@ class CorruptionTest {
if (i == n - 1) {
options.sync = true;
}
- ASSERT_OK(db_->Write(options, &batch));
+ ASSERT_LEVELDB_OK(db_->Write(options, &batch));
}
}
@@ -112,7 +112,7 @@ class CorruptionTest {
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
// Pick file to corrupt
std::vector<std::string> filenames;
- ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
+ ASSERT_LEVELDB_OK(env_.target()->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
std::string fname;
@@ -127,7 +127,7 @@ class CorruptionTest {
ASSERT_TRUE(!fname.empty()) << filetype;
uint64_t file_size;
- ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
+ ASSERT_LEVELDB_OK(env_.target()->GetFileSize(fname, &file_size));
if (offset < 0) {
// Relative to end of file; make it absolute
@@ -189,7 +189,7 @@ class CorruptionTest {
Cache* tiny_cache_;
};
-TEST(CorruptionTest, Recovery) {
+TEST_F(CorruptionTest, Recovery) {
Build(100);
Check(100, 100);
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
@@ -200,13 +200,13 @@ TEST(CorruptionTest, Recovery) {
Check(36, 36);
}
-TEST(CorruptionTest, RecoverWriteError) {
+TEST_F(CorruptionTest, RecoverWriteError) {
env_.writable_file_error_ = true;
Status s = TryReopen();
ASSERT_TRUE(!s.ok());
}
-TEST(CorruptionTest, NewFileErrorDuringWrite) {
+TEST_F(CorruptionTest, NewFileErrorDuringWrite) {
// Do enough writing to force minor compaction
env_.writable_file_error_ = true;
const int num = 3 + (Options().write_buffer_size / kValueSize);
@@ -223,7 +223,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) {
Reopen();
}
-TEST(CorruptionTest, TableFile) {
+TEST_F(CorruptionTest, TableFile) {
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
@@ -234,7 +234,7 @@ TEST(CorruptionTest, TableFile) {
Check(90, 99);
}
-TEST(CorruptionTest, TableFileRepair) {
+TEST_F(CorruptionTest, TableFileRepair) {
options_.block_size = 2 * kValueSize; // Limit scope of corruption
options_.paranoid_checks = true;
Reopen();
@@ -250,7 +250,7 @@ TEST(CorruptionTest, TableFileRepair) {
Check(95, 99);
}
-TEST(CorruptionTest, TableFileIndexData) {
+TEST_F(CorruptionTest, TableFileIndexData) {
Build(10000); // Enough to build multiple Tables
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
@@ -260,36 +260,36 @@ TEST(CorruptionTest, TableFileIndexData) {
Check(5000, 9999);
}
-TEST(CorruptionTest, MissingDescriptor) {
+TEST_F(CorruptionTest, MissingDescriptor) {
Build(1000);
RepairDB();
Reopen();
Check(1000, 1000);
}
-TEST(CorruptionTest, SequenceNumberRecovery) {
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5"));
+TEST_F(CorruptionTest, SequenceNumberRecovery) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v3"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v4"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v5"));
RepairDB();
Reopen();
std::string v;
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v5", v);
// Write something. If sequence number was not recovered properly,
// it will be hidden by an earlier write.
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6"));
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v6"));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v6", v);
Reopen();
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v6", v);
}
-TEST(CorruptionTest, CorruptedDescriptor) {
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
+TEST_F(CorruptionTest, CorruptedDescriptor) {
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
dbi->TEST_CompactRange(0, nullptr, nullptr);
@@ -301,11 +301,11 @@ TEST(CorruptionTest, CorruptedDescriptor) {
RepairDB();
Reopen();
std::string v;
- ASSERT_OK(db_->Get(ReadOptions(), "foo", &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("hello", v);
}
-TEST(CorruptionTest, CompactionInputError) {
+TEST_F(CorruptionTest, CompactionInputError) {
Build(10);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
@@ -320,7 +320,7 @@ TEST(CorruptionTest, CompactionInputError) {
Check(10000, 10000);
}
-TEST(CorruptionTest, CompactionInputErrorParanoid) {
+TEST_F(CorruptionTest, CompactionInputErrorParanoid) {
options_.paranoid_checks = true;
options_.write_buffer_size = 512 << 10;
Reopen();
@@ -341,22 +341,26 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
}
-TEST(CorruptionTest, UnrelatedKeys) {
+TEST_F(CorruptionTest, UnrelatedKeys) {
Build(10);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
Corrupt(kTableFile, 100, 1);
std::string tmp1, tmp2;
- ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
+ ASSERT_LEVELDB_OK(
+ db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
std::string v;
- ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
dbi->TEST_CompactMemTable();
- ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
+ ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
}
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/db_test.cc b/db/db_test.cc
index 9a8faf1..e8e3495 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -7,6 +7,7 @@
#include <atomic>
#include <string>
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
@@ -20,7 +21,6 @@
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
@@ -226,7 +226,7 @@ class SpecialEnv : public EnvWrapper {
}
};
-class DBTest {
+class DBTest : public testing::Test {
public:
std::string dbname_;
SpecialEnv* env_;
@@ -236,7 +236,7 @@ class DBTest {
DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
- dbname_ = test::TmpDir() + "/db_test";
+ dbname_ = testing::TempDir() + "db_test";
DestroyDB(dbname_, Options());
db_ = nullptr;
Reopen();
@@ -283,7 +283,9 @@ class DBTest {
DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
- void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
+ void Reopen(Options* options = nullptr) {
+ ASSERT_LEVELDB_OK(TryReopen(options));
+ }
void Close() {
delete db_;
@@ -294,7 +296,7 @@ class DBTest {
delete db_;
db_ = nullptr;
DestroyDB(dbname_, Options());
- ASSERT_OK(TryReopen(options));
+ ASSERT_LEVELDB_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
@@ -348,11 +350,11 @@ class DBTest {
// Check reverse iteration results are the reverse of forward results
size_t matched = 0;
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
- ASSERT_LT(matched, forward.size());
- ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
+ EXPECT_LT(matched, forward.size());
+ EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
matched++;
}
- ASSERT_EQ(matched, forward.size());
+ EXPECT_EQ(matched, forward.size());
delete iter;
return result;
@@ -402,7 +404,7 @@ class DBTest {
int NumTableFilesAtLevel(int level) {
std::string property;
- ASSERT_TRUE(db_->GetProperty(
+ EXPECT_TRUE(db_->GetProperty(
"leveldb.num-files-at-level" + NumberToString(level), &property));
return std::stoi(property);
}
@@ -497,12 +499,12 @@ class DBTest {
bool DeleteAnSSTFile() {
std::vector<std::string> filenames;
- ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
- ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number)));
+ EXPECT_LEVELDB_OK(env_->DeleteFile(TableFileName(dbname_, number)));
return true;
}
}
@@ -512,7 +514,7 @@ class DBTest {
// Returns number of files renamed.
int RenameLDBToSST() {
std::vector<std::string> filenames;
- ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
int files_renamed = 0;
@@ -520,7 +522,7 @@ class DBTest {
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
const std::string from = TableFileName(dbname_, number);
const std::string to = SSTTableFileName(dbname_, number);
- ASSERT_OK(env_->RenameFile(from, to));
+ EXPECT_LEVELDB_OK(env_->RenameFile(from, to));
files_renamed++;
}
}
@@ -535,63 +537,63 @@ class DBTest {
int option_config_;
};
-TEST(DBTest, Empty) {
+TEST_F(DBTest, Empty) {
do {
ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, EmptyKey) {
+TEST_F(DBTest, EmptyKey) {
do {
- ASSERT_OK(Put("", "v1"));
+ ASSERT_LEVELDB_OK(Put("", "v1"));
ASSERT_EQ("v1", Get(""));
- ASSERT_OK(Put("", "v2"));
+ ASSERT_LEVELDB_OK(Put("", "v2"));
ASSERT_EQ("v2", Get(""));
} while (ChangeOptions());
}
-TEST(DBTest, EmptyValue) {
+TEST_F(DBTest, EmptyValue) {
do {
- ASSERT_OK(Put("key", "v1"));
+ ASSERT_LEVELDB_OK(Put("key", "v1"));
ASSERT_EQ("v1", Get("key"));
- ASSERT_OK(Put("key", ""));
+ ASSERT_LEVELDB_OK(Put("key", ""));
ASSERT_EQ("", Get("key"));
- ASSERT_OK(Put("key", "v2"));
+ ASSERT_LEVELDB_OK(Put("key", "v2"));
ASSERT_EQ("v2", Get("key"));
} while (ChangeOptions());
}
-TEST(DBTest, ReadWrite) {
+TEST_F(DBTest, ReadWrite) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- ASSERT_OK(Put("bar", "v2"));
- ASSERT_OK(Put("foo", "v3"));
+ ASSERT_LEVELDB_OK(Put("bar", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v3"));
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
} while (ChangeOptions());
}
-TEST(DBTest, PutDeleteGet) {
+TEST_F(DBTest, PutDeleteGet) {
do {
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
- ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo"));
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetFromImmutableLayer) {
+TEST_F(DBTest, GetFromImmutableLayer) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
// Block sync calls.
@@ -604,17 +606,17 @@ TEST(DBTest, GetFromImmutableLayer) {
} while (ChangeOptions());
}
-TEST(DBTest, GetFromVersions) {
+TEST_F(DBTest, GetFromVersions) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v1", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetMemUsage) {
+TEST_F(DBTest, GetMemUsage) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
std::string val;
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
int mem_usage = std::stoi(val);
@@ -623,14 +625,14 @@ TEST(DBTest, GetMemUsage) {
} while (ChangeOptions());
}
-TEST(DBTest, GetSnapshot) {
+TEST_F(DBTest, GetSnapshot) {
do {
// Try with both a short key and a long key
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
- ASSERT_OK(Put(key, "v1"));
+ ASSERT_LEVELDB_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
- ASSERT_OK(Put(key, "v2"));
+ ASSERT_LEVELDB_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
dbfull()->TEST_CompactMemTable();
@@ -641,16 +643,16 @@ TEST(DBTest, GetSnapshot) {
} while (ChangeOptions());
}
-TEST(DBTest, GetIdenticalSnapshots) {
+TEST_F(DBTest, GetIdenticalSnapshots) {
do {
// Try with both a short key and a long key
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
- ASSERT_OK(Put(key, "v1"));
+ ASSERT_LEVELDB_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
const Snapshot* s2 = db_->GetSnapshot();
const Snapshot* s3 = db_->GetSnapshot();
- ASSERT_OK(Put(key, "v2"));
+ ASSERT_LEVELDB_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
ASSERT_EQ("v1", Get(key, s2));
@@ -666,13 +668,13 @@ TEST(DBTest, GetIdenticalSnapshots) {
} while (ChangeOptions());
}
-TEST(DBTest, IterateOverEmptySnapshot) {
+TEST_F(DBTest, IterateOverEmptySnapshot) {
do {
const Snapshot* snapshot = db_->GetSnapshot();
ReadOptions read_options;
read_options.snapshot = snapshot;
- ASSERT_OK(Put("foo", "v1"));
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Iterator* iterator1 = db_->NewIterator(read_options);
iterator1->SeekToFirst();
@@ -690,41 +692,41 @@ TEST(DBTest, IterateOverEmptySnapshot) {
} while (ChangeOptions());
}
-TEST(DBTest, GetLevel0Ordering) {
+TEST_F(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
// below generates two level-0 files where the earlier one comes
// before the later one in the level-0 file list since the earlier
// one has a smaller "smallest" key.
- ASSERT_OK(Put("bar", "b"));
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("bar", "b"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetOrderedByLevels) {
+TEST_F(DBTest, GetOrderedByLevels) {
do {
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
Compact("a", "z");
ASSERT_EQ("v1", Get("foo"));
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
-TEST(DBTest, GetPicksCorrectFile) {
+TEST_F(DBTest, GetPicksCorrectFile) {
do {
// Arrange to have multiple files in a non-level-0 level.
- ASSERT_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("a", "va"));
Compact("a", "b");
- ASSERT_OK(Put("x", "vx"));
+ ASSERT_LEVELDB_OK(Put("x", "vx"));
Compact("x", "y");
- ASSERT_OK(Put("f", "vf"));
+ ASSERT_LEVELDB_OK(Put("f", "vf"));
Compact("f", "g");
ASSERT_EQ("va", Get("a"));
ASSERT_EQ("vf", Get("f"));
@@ -732,7 +734,7 @@ TEST(DBTest, GetPicksCorrectFile) {
} while (ChangeOptions());
}
-TEST(DBTest, GetEncountersEmptyLevel) {
+TEST_F(DBTest, GetEncountersEmptyLevel) {
do {
// Arrange for the following to happen:
// * sstable A in level 0
@@ -770,7 +772,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
} while (ChangeOptions());
}
-TEST(DBTest, IterEmpty) {
+TEST_F(DBTest, IterEmpty) {
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
@@ -785,8 +787,8 @@ TEST(DBTest, IterEmpty) {
delete iter;
}
-TEST(DBTest, IterSingle) {
- ASSERT_OK(Put("a", "va"));
+TEST_F(DBTest, IterSingle) {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
@@ -823,10 +825,10 @@ TEST(DBTest, IterSingle) {
delete iter;
}
-TEST(DBTest, IterMulti) {
- ASSERT_OK(Put("a", "va"));
- ASSERT_OK(Put("b", "vb"));
- ASSERT_OK(Put("c", "vc"));
+TEST_F(DBTest, IterMulti) {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("b", "vb"));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
@@ -881,11 +883,11 @@ TEST(DBTest, IterMulti) {
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
- ASSERT_OK(Put("a", "va2"));
- ASSERT_OK(Put("a2", "va3"));
- ASSERT_OK(Put("b", "vb2"));
- ASSERT_OK(Put("c", "vc2"));
- ASSERT_OK(Delete("b"));
+ ASSERT_LEVELDB_OK(Put("a", "va2"));
+ ASSERT_LEVELDB_OK(Put("a2", "va3"));
+ ASSERT_LEVELDB_OK(Put("b", "vb2"));
+ ASSERT_LEVELDB_OK(Put("c", "vc2"));
+ ASSERT_LEVELDB_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
@@ -906,12 +908,12 @@ TEST(DBTest, IterMulti) {
delete iter;
}
-TEST(DBTest, IterSmallAndLargeMix) {
- ASSERT_OK(Put("a", "va"));
- ASSERT_OK(Put("b", std::string(100000, 'b')));
- ASSERT_OK(Put("c", "vc"));
- ASSERT_OK(Put("d", std::string(100000, 'd')));
- ASSERT_OK(Put("e", std::string(100000, 'e')));
+TEST_F(DBTest, IterSmallAndLargeMix) {
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b')));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
+ ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd')));
+ ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e')));
Iterator* iter = db_->NewIterator(ReadOptions());
@@ -944,12 +946,12 @@ TEST(DBTest, IterSmallAndLargeMix) {
delete iter;
}
-TEST(DBTest, IterMultiWithDelete) {
+TEST_F(DBTest, IterMultiWithDelete) {
do {
- ASSERT_OK(Put("a", "va"));
- ASSERT_OK(Put("b", "vb"));
- ASSERT_OK(Put("c", "vc"));
- ASSERT_OK(Delete("b"));
+ ASSERT_LEVELDB_OK(Put("a", "va"));
+ ASSERT_LEVELDB_OK(Put("b", "vb"));
+ ASSERT_LEVELDB_OK(Put("c", "vc"));
+ ASSERT_LEVELDB_OK(Delete("b"));
ASSERT_EQ("NOT_FOUND", Get("b"));
Iterator* iter = db_->NewIterator(ReadOptions());
@@ -961,35 +963,35 @@ TEST(DBTest, IterMultiWithDelete) {
} while (ChangeOptions());
}
-TEST(DBTest, Recover) {
+TEST_F(DBTest, Recover) {
do {
- ASSERT_OK(Put("foo", "v1"));
- ASSERT_OK(Put("baz", "v5"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("baz", "v5"));
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v5", Get("baz"));
- ASSERT_OK(Put("bar", "v2"));
- ASSERT_OK(Put("foo", "v3"));
+ ASSERT_LEVELDB_OK(Put("bar", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
- ASSERT_OK(Put("foo", "v4"));
+ ASSERT_LEVELDB_OK(Put("foo", "v4"));
ASSERT_EQ("v4", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ("v5", Get("baz"));
} while (ChangeOptions());
}
-TEST(DBTest, RecoveryWithEmptyLog) {
+TEST_F(DBTest, RecoveryWithEmptyLog) {
do {
- ASSERT_OK(Put("foo", "v1"));
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Reopen();
Reopen();
- ASSERT_OK(Put("foo", "v3"));
+ ASSERT_LEVELDB_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
} while (ChangeOptions());
@@ -997,7 +999,7 @@ TEST(DBTest, RecoveryWithEmptyLog) {
// Check that writes done during a memtable compaction are recovered
// if the database is shutdown during the memtable compaction.
-TEST(DBTest, RecoverDuringMemtableCompaction) {
+TEST_F(DBTest, RecoverDuringMemtableCompaction) {
do {
Options options = CurrentOptions();
options.env = env_;
@@ -1005,10 +1007,12 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
Reopen(&options);
// Trigger a long memtable compaction and reopen the database during it
- ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
- ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
- ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
- ASSERT_OK(Put("bar", "v2")); // Goes to new log file
+ ASSERT_LEVELDB_OK(Put("foo", "v1")); // Goes to 1st log file
+ ASSERT_LEVELDB_OK(
+ Put("big1", std::string(10000000, 'x'))); // Fills memtable
+ ASSERT_LEVELDB_OK(
+ Put("big2", std::string(1000, 'y'))); // Triggers compaction
+ ASSERT_LEVELDB_OK(Put("bar", "v2")); // Goes to new log file
Reopen(&options);
ASSERT_EQ("v1", Get("foo"));
@@ -1024,7 +1028,7 @@ static std::string Key(int i) {
return std::string(buf);
}
-TEST(DBTest, MinorCompactionsHappen) {
+TEST_F(DBTest, MinorCompactionsHappen) {
Options options = CurrentOptions();
options.write_buffer_size = 10000;
Reopen(&options);
@@ -1033,7 +1037,7 @@ TEST(DBTest, MinorCompactionsHappen) {
int starting_num_tables = TotalTableFiles();
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
+ ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
}
int ending_num_tables = TotalTableFiles();
ASSERT_GT(ending_num_tables, starting_num_tables);
@@ -1049,14 +1053,14 @@ TEST(DBTest, MinorCompactionsHappen) {
}
}
-TEST(DBTest, RecoverWithLargeLog) {
+TEST_F(DBTest, RecoverWithLargeLog) {
{
Options options = CurrentOptions();
Reopen(&options);
- ASSERT_OK(Put("big1", std::string(200000, '1')));
- ASSERT_OK(Put("big2", std::string(200000, '2')));
- ASSERT_OK(Put("small3", std::string(10, '3')));
- ASSERT_OK(Put("small4", std::string(10, '4')));
+ ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1')));
+ ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2')));
+ ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3')));
+ ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4')));
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
}
@@ -1073,7 +1077,7 @@ TEST(DBTest, RecoverWithLargeLog) {
ASSERT_GT(NumTableFilesAtLevel(0), 1);
}
-TEST(DBTest, CompactionsGenerateMultipleFiles) {
+TEST_F(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
@@ -1085,7 +1089,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
std::vector<std::string> values;
for (int i = 0; i < 80; i++) {
values.push_back(RandomString(&rnd, 100000));
- ASSERT_OK(Put(Key(i), values[i]));
+ ASSERT_LEVELDB_OK(Put(Key(i), values[i]));
}
// Reopening moves updates to level-0
@@ -1099,7 +1103,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) {
}
}
-TEST(DBTest, RepeatedWritesToSameKey) {
+TEST_F(DBTest, RepeatedWritesToSameKey) {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
@@ -1118,7 +1122,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
}
}
-TEST(DBTest, SparseMerge) {
+TEST_F(DBTest, SparseMerge) {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen(&options);
@@ -1168,7 +1172,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
return result;
}
-TEST(DBTest, ApproximateSizes) {
+TEST_F(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
@@ -1186,7 +1190,7 @@ TEST(DBTest, ApproximateSizes) {
static const int S2 = 105000; // Allow some expansion from metadata
Random rnd(301);
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
+ ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1)));
}
// 0 because GetApproximateSizes() does not account for memtable space
@@ -1227,7 +1231,7 @@ TEST(DBTest, ApproximateSizes) {
} while (ChangeOptions());
}
-TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
+TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
do {
Options options = CurrentOptions();
options.compression = kNoCompression;
@@ -1235,18 +1239,18 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
Random rnd(301);
std::string big1 = RandomString(&rnd, 100000);
- ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(2), big1));
- ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(4), big1));
- ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
- ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
- ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(2), big1));
+ ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(4), big1));
+ ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000)));
+ ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000)));
+ ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000)));
if (options.reuse_logs) {
// Need to force a memtable compaction since recovery does not do so.
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
}
// Check sizes across recovery by reopening a few times
@@ -1270,7 +1274,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
} while (ChangeOptions());
}
-TEST(DBTest, IteratorPinsRef) {
+TEST_F(DBTest, IteratorPinsRef) {
Put("foo", "hello");
// Get iterator that will yield the current contents of the DB.
@@ -1279,7 +1283,8 @@ TEST(DBTest, IteratorPinsRef) {
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
+ ASSERT_LEVELDB_OK(
+ Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
@@ -1292,7 +1297,7 @@ TEST(DBTest, IteratorPinsRef) {
delete iter;
}
-TEST(DBTest, Snapshot) {
+TEST_F(DBTest, Snapshot) {
do {
Put("foo", "v1");
const Snapshot* s1 = db_->GetSnapshot();
@@ -1321,7 +1326,7 @@ TEST(DBTest, Snapshot) {
} while (ChangeOptions());
}
-TEST(DBTest, HiddenValuesAreRemoved) {
+TEST_F(DBTest, HiddenValuesAreRemoved) {
do {
Random rnd(301);
FillLevels("a", "z");
@@ -1333,7 +1338,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
Put("foo", "tiny");
Put("pastfoo2", "v2"); // Advance sequence number one more
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(big, Get("foo", snapshot));
@@ -1352,9 +1357,9 @@ TEST(DBTest, HiddenValuesAreRemoved) {
} while (ChangeOptions());
}
-TEST(DBTest, DeletionMarkers1) {
+TEST_F(DBTest, DeletionMarkers1) {
Put("foo", "v1");
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
@@ -1368,7 +1373,7 @@ TEST(DBTest, DeletionMarkers1) {
Delete("foo");
Put("foo", "v2");
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
- ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
@@ -1381,9 +1386,9 @@ TEST(DBTest, DeletionMarkers1) {
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
}
-TEST(DBTest, DeletionMarkers2) {
+TEST_F(DBTest, DeletionMarkers2) {
Put("foo", "v1");
- ASSERT_OK(dbfull()->TEST_CompactMemTable());
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
@@ -1396,7 +1401,7 @@ TEST(DBTest, DeletionMarkers2) {
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
+ ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
// DEL kept: "last" file overlaps
@@ -1407,17 +1412,17 @@ TEST(DBTest, DeletionMarkers2) {
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
}
-TEST(DBTest, OverlapInLevel0) {
+TEST_F(DBTest, OverlapInLevel0) {
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
// Fill levels 1 and 2 to disable the pushing of new memtables to levels >
// 0.
- ASSERT_OK(Put("100", "v100"));
- ASSERT_OK(Put("999", "v999"));
+ ASSERT_LEVELDB_OK(Put("100", "v100"));
+ ASSERT_LEVELDB_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
- ASSERT_OK(Delete("100"));
- ASSERT_OK(Delete("999"));
+ ASSERT_LEVELDB_OK(Delete("100"));
+ ASSERT_LEVELDB_OK(Delete("999"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("0,1,1", FilesPerLevel());
@@ -1425,12 +1430,12 @@ TEST(DBTest, OverlapInLevel0) {
// files[0] 200 .. 900
// files[1] 300 .. 500
// Note that files are sorted by smallest key.
- ASSERT_OK(Put("300", "v300"));
- ASSERT_OK(Put("500", "v500"));
+ ASSERT_LEVELDB_OK(Put("300", "v300"));
+ ASSERT_LEVELDB_OK(Put("500", "v500"));
dbfull()->TEST_CompactMemTable();
- ASSERT_OK(Put("200", "v200"));
- ASSERT_OK(Put("600", "v600"));
- ASSERT_OK(Put("900", "v900"));
+ ASSERT_LEVELDB_OK(Put("200", "v200"));
+ ASSERT_LEVELDB_OK(Put("600", "v600"));
+ ASSERT_LEVELDB_OK(Put("900", "v900"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("2,1,1", FilesPerLevel());
@@ -1442,23 +1447,23 @@ TEST(DBTest, OverlapInLevel0) {
// Do a memtable compaction. Before bug-fix, the compaction would
// not detect the overlap with level-0 files and would incorrectly place
// the deletion in a deeper level.
- ASSERT_OK(Delete("600"));
+ ASSERT_LEVELDB_OK(Delete("600"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("3", FilesPerLevel());
ASSERT_EQ("NOT_FOUND", Get("600"));
} while (ChangeOptions());
}
-TEST(DBTest, L0_CompactionBug_Issue44_a) {
+TEST_F(DBTest, L0_CompactionBug_Issue44_a) {
Reopen();
- ASSERT_OK(Put("b", "v"));
+ ASSERT_LEVELDB_OK(Put("b", "v"));
Reopen();
- ASSERT_OK(Delete("b"));
- ASSERT_OK(Delete("a"));
+ ASSERT_LEVELDB_OK(Delete("b"));
+ ASSERT_LEVELDB_OK(Delete("a"));
Reopen();
- ASSERT_OK(Delete("a"));
+ ASSERT_LEVELDB_OK(Delete("a"));
Reopen();
- ASSERT_OK(Put("a", "v"));
+ ASSERT_LEVELDB_OK(Put("a", "v"));
Reopen();
Reopen();
ASSERT_EQ("(a->v)", Contents());
@@ -1466,7 +1471,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
ASSERT_EQ("(a->v)", Contents());
}
-TEST(DBTest, L0_CompactionBug_Issue44_b) {
+TEST_F(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
Put("", "");
Reopen();
@@ -1492,16 +1497,16 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
ASSERT_EQ("(->)(c->cv)", Contents());
}
-TEST(DBTest, Fflush_Issue474) {
+TEST_F(DBTest, Fflush_Issue474) {
static const int kNum = 100000;
Random rnd(test::RandomSeed());
for (int i = 0; i < kNum; i++) {
fflush(nullptr);
- ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
+ ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
}
}
-TEST(DBTest, ComparatorCheck) {
+TEST_F(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
const char* Name() const override { return "leveldb.NewComparator"; }
@@ -1524,7 +1529,7 @@ TEST(DBTest, ComparatorCheck) {
<< s.ToString();
}
-TEST(DBTest, CustomComparator) {
+TEST_F(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
const char* Name() const override { return "test.NumberComparator"; }
@@ -1542,11 +1547,11 @@ TEST(DBTest, CustomComparator) {
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
- ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
+ EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
<< EscapeString(x);
int val;
char ignored;
- ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
+ EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
<< EscapeString(x);
return val;
}
@@ -1558,8 +1563,8 @@ TEST(DBTest, CustomComparator) {
new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
- ASSERT_OK(Put("[10]", "ten"));
- ASSERT_OK(Put("[0x14]", "twenty"));
+ ASSERT_LEVELDB_OK(Put("[10]", "ten"));
+ ASSERT_LEVELDB_OK(Put("[0x14]", "twenty"));
for (int i = 0; i < 2; i++) {
ASSERT_EQ("ten", Get("[10]"));
ASSERT_EQ("ten", Get("[0xa]"));
@@ -1574,13 +1579,13 @@ TEST(DBTest, CustomComparator) {
for (int i = 0; i < 1000; i++) {
char buf[100];
snprintf(buf, sizeof(buf), "[%d]", i * 10);
- ASSERT_OK(Put(buf, buf));
+ ASSERT_LEVELDB_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
}
}
-TEST(DBTest, ManualCompaction) {
+TEST_F(DBTest, ManualCompaction) {
ASSERT_EQ(config::kMaxMemCompactLevel, 2)
<< "Need to update this test to match kMaxMemCompactLevel";
@@ -1614,8 +1619,8 @@ TEST(DBTest, ManualCompaction) {
ASSERT_EQ("0,0,1", FilesPerLevel());
}
-TEST(DBTest, DBOpen_Options) {
- std::string dbname = test::TmpDir() + "/db_options_test";
+TEST_F(DBTest, DBOpen_Options) {
+ std::string dbname = testing::TempDir() + "db_options_test";
DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error
@@ -1629,7 +1634,7 @@ TEST(DBTest, DBOpen_Options) {
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
@@ -1646,15 +1651,15 @@ TEST(DBTest, DBOpen_Options) {
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
}
-TEST(DBTest, DestroyEmptyDir) {
- std::string dbname = test::TmpDir() + "/db_empty_dir";
+TEST_F(DBTest, DestroyEmptyDir) {
+ std::string dbname = testing::TempDir() + "db_empty_dir";
TestEnv env(Env::Default());
env.DeleteDir(dbname);
ASSERT_TRUE(!env.FileExists(dbname));
@@ -1662,34 +1667,34 @@ TEST(DBTest, DestroyEmptyDir) {
Options opts;
opts.env = &env;
- ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_LEVELDB_OK(env.CreateDir(dbname));
ASSERT_TRUE(env.FileExists(dbname));
std::vector<std::string> children;
- ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
// The stock Env's do not filter out '.' and '..' special files.
ASSERT_EQ(2, children.size());
- ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
ASSERT_TRUE(!env.FileExists(dbname));
// Should also be destroyed if Env is filtering out dot files.
env.SetIgnoreDotFiles(true);
- ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_LEVELDB_OK(env.CreateDir(dbname));
ASSERT_TRUE(env.FileExists(dbname));
- ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
ASSERT_EQ(0, children.size());
- ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
ASSERT_TRUE(!env.FileExists(dbname));
}
-TEST(DBTest, DestroyOpenDB) {
- std::string dbname = test::TmpDir() + "/open_db_dir";
+TEST_F(DBTest, DestroyOpenDB) {
+ std::string dbname = testing::TempDir() + "open_db_dir";
env_->DeleteDir(dbname);
ASSERT_TRUE(!env_->FileExists(dbname));
Options opts;
opts.create_if_missing = true;
DB* db = nullptr;
- ASSERT_OK(DB::Open(opts, dbname, &db));
+ ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db));
ASSERT_TRUE(db != nullptr);
// Must fail to destroy an open db.
@@ -1701,23 +1706,23 @@ TEST(DBTest, DestroyOpenDB) {
db = nullptr;
// Should succeed destroying a closed db.
- ASSERT_OK(DestroyDB(dbname, Options()));
+ ASSERT_LEVELDB_OK(DestroyDB(dbname, Options()));
ASSERT_TRUE(!env_->FileExists(dbname));
}
-TEST(DBTest, Locking) {
+TEST_F(DBTest, Locking) {
DB* db2 = nullptr;
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
}
// Check that number of files does not grow when we are out of space
-TEST(DBTest, NoSpace) {
+TEST_F(DBTest, NoSpace) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
@@ -1732,12 +1737,12 @@ TEST(DBTest, NoSpace) {
ASSERT_LT(CountFiles(), num_files + 3);
}
-TEST(DBTest, NonWritableFileSystem) {
+TEST_F(DBTest, NonWritableFileSystem) {
Options options = CurrentOptions();
options.write_buffer_size = 1000;
options.env = env_;
Reopen(&options);
- ASSERT_OK(Put("foo", "v1"));
+ ASSERT_LEVELDB_OK(Put("foo", "v1"));
// Force errors for new files.
env_->non_writable_.store(true, std::memory_order_release);
std::string big(100000, 'x');
@@ -1753,7 +1758,7 @@ TEST(DBTest, NonWritableFileSystem) {
env_->non_writable_.store(false, std::memory_order_release);
}
-TEST(DBTest, WriteSyncError) {
+TEST_F(DBTest, WriteSyncError) {
// Check that log sync errors cause the DB to disallow future writes.
// (a) Cause log sync calls to fail
@@ -1764,7 +1769,7 @@ TEST(DBTest, WriteSyncError) {
// (b) Normal write should succeed
WriteOptions w;
- ASSERT_OK(db_->Put(w, "k1", "v1"));
+ ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1"));
ASSERT_EQ("v1", Get("k1"));
// (c) Do a sync write; should fail
@@ -1784,7 +1789,7 @@ TEST(DBTest, WriteSyncError) {
ASSERT_EQ("NOT_FOUND", Get("k3"));
}
-TEST(DBTest, ManifestWriteError) {
+TEST_F(DBTest, ManifestWriteError) {
// Test for the following problem:
// (a) Compaction produces file F
// (b) Log record containing F is written to MANIFEST file, but Sync() fails
@@ -1803,7 +1808,7 @@ TEST(DBTest, ManifestWriteError) {
options.create_if_missing = true;
options.error_if_exists = false;
DestroyAndReopen(&options);
- ASSERT_OK(Put("foo", "bar"));
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Memtable compaction (will succeed)
@@ -1824,8 +1829,8 @@ TEST(DBTest, ManifestWriteError) {
}
}
-TEST(DBTest, MissingSSTFile) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(DBTest, MissingSSTFile) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Dump the memtable to disk.
@@ -1841,8 +1846,8 @@ TEST(DBTest, MissingSSTFile) {
ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
}
-TEST(DBTest, StillReadSST) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(DBTest, StillReadSST) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Dump the memtable to disk.
@@ -1857,18 +1862,18 @@ TEST(DBTest, StillReadSST) {
ASSERT_EQ("bar", Get("foo"));
}
-TEST(DBTest, FilesDeletedAfterCompaction) {
- ASSERT_OK(Put("foo", "v2"));
+TEST_F(DBTest, FilesDeletedAfterCompaction) {
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Compact("a", "z");
const int num_files = CountFiles();
for (int i = 0; i < 10; i++) {
- ASSERT_OK(Put("foo", "v2"));
+ ASSERT_LEVELDB_OK(Put("foo", "v2"));
Compact("a", "z");
}
ASSERT_EQ(CountFiles(), num_files);
}
-TEST(DBTest, BloomFilter) {
+TEST_F(DBTest, BloomFilter) {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
@@ -1879,11 +1884,11 @@ TEST(DBTest, BloomFilter) {
// Populate multiple layers
const int N = 10000;
for (int i = 0; i < N; i++) {
- ASSERT_OK(Put(Key(i), Key(i)));
+ ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
}
Compact("a", "z");
for (int i = 0; i < N; i += 100) {
- ASSERT_OK(Put(Key(i), Key(i)));
+ ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
}
dbfull()->TEST_CompactMemTable();
@@ -1955,7 +1960,7 @@ static void MTThreadBody(void* arg) {
// We add some padding for force compactions.
snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
static_cast<int>(counter));
- ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
+ ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
@@ -1963,7 +1968,7 @@ static void MTThreadBody(void* arg) {
// Key has not yet been written
} else {
// Check that the writer thread counter is >= the counter in the value
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
int k, w, c;
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
ASSERT_EQ(k, key);
@@ -1980,7 +1985,7 @@ static void MTThreadBody(void* arg) {
} // namespace
-TEST(DBTest, MultiThreaded) {
+TEST_F(DBTest, MultiThreaded) {
do {
// Initialize state
MTState mt;
@@ -2158,7 +2163,7 @@ static bool CompareIterators(int step, DB* model, DB* db,
return ok;
}
-TEST(DBTest, Randomized) {
+TEST_F(DBTest, Randomized) {
Random rnd(test::RandomSeed());
do {
ModelDB model(CurrentOptions());
@@ -2176,13 +2181,13 @@ TEST(DBTest, Randomized) {
k = RandomKey(&rnd);
v = RandomString(
&rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
- ASSERT_OK(model.Put(WriteOptions(), k, v));
- ASSERT_OK(db_->Put(WriteOptions(), k, v));
+ ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v));
+ ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v));
} else if (p < 90) { // Delete
k = RandomKey(&rnd);
- ASSERT_OK(model.Delete(WriteOptions(), k));
- ASSERT_OK(db_->Delete(WriteOptions(), k));
+ ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k));
} else { // Multi-element batch
WriteBatch b;
@@ -2201,8 +2206,8 @@ TEST(DBTest, Randomized) {
b.Delete(k);
}
}
- ASSERT_OK(model.Write(WriteOptions(), &b));
- ASSERT_OK(db_->Write(WriteOptions(), &b));
+ ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b));
+ ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b));
}
if ((step % 100) == 0) {
@@ -2233,14 +2238,14 @@ std::string MakeKey(unsigned int num) {
}
void BM_LogAndApply(int iters, int num_base_files) {
- std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
+ std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
- ASSERT_OK(s);
+ ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
@@ -2255,7 +2260,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
Options options;
VersionSet vset(dbname, &options, nullptr, &cmp);
bool save_manifest;
- ASSERT_OK(vset.Recover(&save_manifest));
+ ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
@@ -2263,7 +2268,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
}
- ASSERT_OK(vset.LogAndApply(&vbase, &mu));
+ ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
uint64_t start_micros = env->NowMicros();
@@ -2295,5 +2300,6 @@ int main(int argc, char** argv) {
return 0;
}
- return leveldb::test::RunAllTests();
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
}
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index 1209369..ca49e0a 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/dbformat.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "util/logging.h"
-#include "util/testharness.h"
namespace leveldb {
@@ -41,8 +42,6 @@ static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
-class FormatTest {};
-
TEST(FormatTest, InternalKey_EncodeDecode) {
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
const uint64_t seq[] = {1,
@@ -128,4 +127,7 @@ TEST(FormatTest, InternalKeyDebugString) {
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 5b31bb8..80b8f12 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -9,6 +9,7 @@
#include <map>
#include <set>
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
@@ -22,7 +23,6 @@
#include "port/thread_annotations.h"
#include "util/logging.h"
#include "util/mutexlock.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
@@ -300,7 +300,7 @@ void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
Status FaultInjectionTestEnv::DeleteFile(const std::string& f) {
Status s = EnvWrapper::DeleteFile(f);
- ASSERT_OK(s);
+ EXPECT_LEVELDB_OK(s);
if (s.ok()) {
UntrackFile(f);
}
@@ -361,7 +361,7 @@ Status FileState::DropUnsyncedData() const {
return Truncate(filename_, sync_pos);
}
-class FaultInjectionTest {
+class FaultInjectionTest : public testing::Test {
public:
enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR };
enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES };
@@ -376,7 +376,7 @@ class FaultInjectionTest {
: env_(new FaultInjectionTestEnv),
tiny_cache_(NewLRUCache(100)),
db_(nullptr) {
- dbname_ = test::TmpDir() + "/fault_test";
+ dbname_ = testing::TempDir() + "fault_test";
DestroyDB(dbname_, Options()); // Destroy any db from earlier run
options_.reuse_logs = true;
options_.env = env_;
@@ -402,7 +402,7 @@ class FaultInjectionTest {
batch.Clear();
batch.Put(key, Value(i, &value_space));
WriteOptions options;
- ASSERT_OK(db_->Write(options, &batch));
+ ASSERT_LEVELDB_OK(db_->Write(options, &batch));
}
}
@@ -424,7 +424,7 @@ class FaultInjectionTest {
s = ReadValue(i, &val);
if (expected == VAL_EXPECT_NO_ERROR) {
if (s.ok()) {
- ASSERT_EQ(value_space, val);
+ EXPECT_EQ(value_space, val);
}
} else if (s.ok()) {
fprintf(stderr, "Expected an error at %d, but was OK\n", i);
@@ -465,7 +465,7 @@ class FaultInjectionTest {
void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
- ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
+ ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), iter->key()));
}
delete iter;
@@ -474,10 +474,10 @@ class FaultInjectionTest {
void ResetDBState(ResetMethod reset_method) {
switch (reset_method) {
case RESET_DROP_UNSYNCED_DATA:
- ASSERT_OK(env_->DropUnsyncedFileData());
+ ASSERT_LEVELDB_OK(env_->DropUnsyncedFileData());
break;
case RESET_DELETE_UNSYNCED_FILES:
- ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync());
+ ASSERT_LEVELDB_OK(env_->DeleteFilesCreatedAfterLastDirSync());
break;
default:
assert(false);
@@ -496,10 +496,11 @@ class FaultInjectionTest {
env_->SetFilesystemActive(false);
CloseDB();
ResetDBState(reset_method);
- ASSERT_OK(OpenDB());
- ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
- ASSERT_OK(Verify(num_pre_sync, num_post_sync,
- FaultInjectionTest::VAL_EXPECT_ERROR));
+ ASSERT_LEVELDB_OK(OpenDB());
+ ASSERT_LEVELDB_OK(
+ Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
+ ASSERT_LEVELDB_OK(Verify(num_pre_sync, num_post_sync,
+ FaultInjectionTest::VAL_EXPECT_ERROR));
}
void NoWriteTestPreFault() {}
@@ -507,12 +508,12 @@ class FaultInjectionTest {
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
CloseDB();
ResetDBState(reset_method);
- ASSERT_OK(OpenDB());
+ ASSERT_LEVELDB_OK(OpenDB());
}
void DoTest() {
Random rnd(0);
- ASSERT_OK(OpenDB());
+ ASSERT_LEVELDB_OK(OpenDB());
for (size_t idx = 0; idx < kNumIterations; idx++) {
int num_pre_sync = rnd.Uniform(kMaxNumValues);
int num_post_sync = rnd.Uniform(kMaxNumValues);
@@ -536,16 +537,19 @@ class FaultInjectionTest {
}
};
-TEST(FaultInjectionTest, FaultTestNoLogReuse) {
+TEST_F(FaultInjectionTest, FaultTestNoLogReuse) {
ReuseLogs(false);
DoTest();
}
-TEST(FaultInjectionTest, FaultTestWithLogReuse) {
+TEST_F(FaultInjectionTest, FaultTestWithLogReuse) {
ReuseLogs(true);
DoTest();
}
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/filename_test.cc b/db/filename_test.cc
index 952f320..ad0bc73 100644
--- a/db/filename_test.cc
+++ b/db/filename_test.cc
@@ -4,15 +4,13 @@
#include "db/filename.h"
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/dbformat.h"
#include "port/port.h"
#include "util/logging.h"
-#include "util/testharness.h"
namespace leveldb {
-class FileNameTest {};
-
TEST(FileNameTest, Parse) {
Slice db;
FileType type;
@@ -128,4 +126,7 @@ TEST(FileNameTest, Construction) {
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/log_test.cc b/db/log_test.cc
index 0e31648..680f267 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
#include "util/random.h"
-#include "util/testharness.h"
namespace leveldb {
namespace log {
@@ -36,7 +36,7 @@ static std::string RandomSkewedString(int i, Random* rnd) {
return BigString(NumberString(i), rnd->Skewed(17));
}
-class LogTest {
+class LogTest : public testing::Test {
public:
LogTest()
: reading_(false),
@@ -177,7 +177,7 @@ class LogTest {
StringSource() : force_error_(false), returned_partial_(false) {}
Status Read(size_t n, Slice* result, char* scratch) override {
- ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
+ EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error";
if (force_error_) {
force_error_ = false;
@@ -258,9 +258,9 @@ uint64_t LogTest::initial_offset_last_record_offsets_[] = {
int LogTest::num_initial_offset_records_ =
sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
-TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
+TEST_F(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
-TEST(LogTest, ReadWrite) {
+TEST_F(LogTest, ReadWrite) {
Write("foo");
Write("bar");
Write("");
@@ -273,7 +273,7 @@ TEST(LogTest, ReadWrite) {
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
}
-TEST(LogTest, ManyBlocks) {
+TEST_F(LogTest, ManyBlocks) {
for (int i = 0; i < 100000; i++) {
Write(NumberString(i));
}
@@ -283,7 +283,7 @@ TEST(LogTest, ManyBlocks) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, Fragmentation) {
+TEST_F(LogTest, Fragmentation) {
Write("small");
Write(BigString("medium", 50000));
Write(BigString("large", 100000));
@@ -293,7 +293,7 @@ TEST(LogTest, Fragmentation) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, MarginalTrailer) {
+TEST_F(LogTest, MarginalTrailer) {
// Make a trailer that is exactly the same length as an empty record.
const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
@@ -306,7 +306,7 @@ TEST(LogTest, MarginalTrailer) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, MarginalTrailer2) {
+TEST_F(LogTest, MarginalTrailer2) {
// Make a trailer that is exactly the same length as an empty record.
const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
@@ -319,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) {
ASSERT_EQ("", ReportMessage());
}
-TEST(LogTest, ShortTrailer) {
+TEST_F(LogTest, ShortTrailer) {
const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
@@ -331,7 +331,7 @@ TEST(LogTest, ShortTrailer) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, AlignedEof) {
+TEST_F(LogTest, AlignedEof) {
const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
@@ -339,7 +339,7 @@ TEST(LogTest, AlignedEof) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, OpenForAppend) {
+TEST_F(LogTest, OpenForAppend) {
Write("hello");
ReopenForAppend();
Write("world");
@@ -348,7 +348,7 @@ TEST(LogTest, OpenForAppend) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, RandomRead) {
+TEST_F(LogTest, RandomRead) {
const int N = 500;
Random write_rnd(301);
for (int i = 0; i < N; i++) {
@@ -363,7 +363,7 @@ TEST(LogTest, RandomRead) {
// Tests of all the error paths in log_reader.cc follow:
-TEST(LogTest, ReadError) {
+TEST_F(LogTest, ReadError) {
Write("foo");
ForceError();
ASSERT_EQ("EOF", Read());
@@ -371,7 +371,7 @@ TEST(LogTest, ReadError) {
ASSERT_EQ("OK", MatchError("read error"));
}
-TEST(LogTest, BadRecordType) {
+TEST_F(LogTest, BadRecordType) {
Write("foo");
// Type is stored in header[6]
IncrementByte(6, 100);
@@ -381,7 +381,7 @@ TEST(LogTest, BadRecordType) {
ASSERT_EQ("OK", MatchError("unknown record type"));
}
-TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
+TEST_F(LogTest, TruncatedTrailingRecordIsIgnored) {
Write("foo");
ShrinkSize(4); // Drop all payload as well as a header byte
ASSERT_EQ("EOF", Read());
@@ -390,7 +390,7 @@ TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
ASSERT_EQ("", ReportMessage());
}
-TEST(LogTest, BadLength) {
+TEST_F(LogTest, BadLength) {
const int kPayloadSize = kBlockSize - kHeaderSize;
Write(BigString("bar", kPayloadSize));
Write("foo");
@@ -401,7 +401,7 @@ TEST(LogTest, BadLength) {
ASSERT_EQ("OK", MatchError("bad record length"));
}
-TEST(LogTest, BadLengthAtEndIsIgnored) {
+TEST_F(LogTest, BadLengthAtEndIsIgnored) {
Write("foo");
ShrinkSize(1);
ASSERT_EQ("EOF", Read());
@@ -409,7 +409,7 @@ TEST(LogTest, BadLengthAtEndIsIgnored) {
ASSERT_EQ("", ReportMessage());
}
-TEST(LogTest, ChecksumMismatch) {
+TEST_F(LogTest, ChecksumMismatch) {
Write("foo");
IncrementByte(0, 10);
ASSERT_EQ("EOF", Read());
@@ -417,7 +417,7 @@ TEST(LogTest, ChecksumMismatch) {
ASSERT_EQ("OK", MatchError("checksum mismatch"));
}
-TEST(LogTest, UnexpectedMiddleType) {
+TEST_F(LogTest, UnexpectedMiddleType) {
Write("foo");
SetByte(6, kMiddleType);
FixChecksum(0, 3);
@@ -426,7 +426,7 @@ TEST(LogTest, UnexpectedMiddleType) {
ASSERT_EQ("OK", MatchError("missing start"));
}
-TEST(LogTest, UnexpectedLastType) {
+TEST_F(LogTest, UnexpectedLastType) {
Write("foo");
SetByte(6, kLastType);
FixChecksum(0, 3);
@@ -435,7 +435,7 @@ TEST(LogTest, UnexpectedLastType) {
ASSERT_EQ("OK", MatchError("missing start"));
}
-TEST(LogTest, UnexpectedFullType) {
+TEST_F(LogTest, UnexpectedFullType) {
Write("foo");
Write("bar");
SetByte(6, kFirstType);
@@ -446,7 +446,7 @@ TEST(LogTest, UnexpectedFullType) {
ASSERT_EQ("OK", MatchError("partial record without end"));
}
-TEST(LogTest, UnexpectedFirstType) {
+TEST_F(LogTest, UnexpectedFirstType) {
Write("foo");
Write(BigString("bar", 100000));
SetByte(6, kFirstType);
@@ -457,7 +457,7 @@ TEST(LogTest, UnexpectedFirstType) {
ASSERT_EQ("OK", MatchError("partial record without end"));
}
-TEST(LogTest, MissingLastIsIgnored) {
+TEST_F(LogTest, MissingLastIsIgnored) {
Write(BigString("bar", kBlockSize));
// Remove the LAST block, including header.
ShrinkSize(14);
@@ -466,7 +466,7 @@ TEST(LogTest, MissingLastIsIgnored) {
ASSERT_EQ(0, DroppedBytes());
}
-TEST(LogTest, PartialLastIsIgnored) {
+TEST_F(LogTest, PartialLastIsIgnored) {
Write(BigString("bar", kBlockSize));
// Cause a bad record length in the LAST block.
ShrinkSize(1);
@@ -475,7 +475,7 @@ TEST(LogTest, PartialLastIsIgnored) {
ASSERT_EQ(0, DroppedBytes());
}
-TEST(LogTest, SkipIntoMultiRecord) {
+TEST_F(LogTest, SkipIntoMultiRecord) {
// Consider a fragmented record:
// first(R1), middle(R1), last(R1), first(R2)
// If initial_offset points to a record after first(R1) but before first(R2)
@@ -491,7 +491,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
ASSERT_EQ("EOF", Read());
}
-TEST(LogTest, ErrorJoinsRecords) {
+TEST_F(LogTest, ErrorJoinsRecords) {
// Consider two fragmented records:
// first(R1) last(R1) first(R2) last(R2)
// where the middle two fragments disappear. We do not want
@@ -514,47 +514,50 @@ TEST(LogTest, ErrorJoinsRecords) {
ASSERT_GE(dropped, 2 * kBlockSize);
}
-TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
+TEST_F(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
-TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
+TEST_F(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
-TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
+TEST_F(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
-TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
+TEST_F(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
-TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
+TEST_F(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
-TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
+TEST_F(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
-TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
+TEST_F(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
-TEST(LogTest, ReadFourthFirstBlockTrailer) {
+TEST_F(LogTest, ReadFourthFirstBlockTrailer) {
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
}
-TEST(LogTest, ReadFourthMiddleBlock) {
+TEST_F(LogTest, ReadFourthMiddleBlock) {
CheckInitialOffsetRecord(log::kBlockSize + 1, 3);
}
-TEST(LogTest, ReadFourthLastBlock) {
+TEST_F(LogTest, ReadFourthLastBlock) {
CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3);
}
-TEST(LogTest, ReadFourthStart) {
+TEST_F(LogTest, ReadFourthStart) {
CheckInitialOffsetRecord(
2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
3);
}
-TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
+TEST_F(LogTest, ReadInitialOffsetIntoBlockPadding) {
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
}
-TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
+TEST_F(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
-TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
+TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
} // namespace log
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 547a959..0657743 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
@@ -10,15 +11,14 @@
#include "leveldb/env.h"
#include "leveldb/write_batch.h"
#include "util/logging.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
-class RecoveryTest {
+class RecoveryTest : public testing::Test {
public:
RecoveryTest() : env_(Env::Default()), db_(nullptr) {
- dbname_ = test::TmpDir() + "/recovery_test";
+ dbname_ = testing::TempDir() + "/recovery_test";
DestroyDB(dbname_, Options());
Open();
}
@@ -63,7 +63,7 @@ class RecoveryTest {
}
void Open(Options* options = nullptr) {
- ASSERT_OK(OpenWithStatus(options));
+ ASSERT_LEVELDB_OK(OpenWithStatus(options));
ASSERT_EQ(1, NumLogs());
}
@@ -84,7 +84,8 @@ class RecoveryTest {
std::string ManifestFileName() {
std::string current;
- ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
+ EXPECT_LEVELDB_OK(
+ ReadFileToString(env_, CurrentFileName(dbname_), &current));
size_t len = current.size();
if (len > 0 && current[len - 1] == '\n') {
current.resize(len - 1);
@@ -100,18 +101,20 @@ class RecoveryTest {
Close();
std::vector<uint64_t> logs = GetFiles(kLogFile);
for (size_t i = 0; i < logs.size(); i++) {
- ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
+ EXPECT_LEVELDB_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
}
return logs.size();
}
- void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
+ void DeleteManifestFile() {
+ ASSERT_LEVELDB_OK(env_->DeleteFile(ManifestFileName()));
+ }
uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
std::vector<uint64_t> GetFiles(FileType t) {
std::vector<std::string> filenames;
- ASSERT_OK(env_->GetChildren(dbname_, &filenames));
+ EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
std::vector<uint64_t> result;
for (size_t i = 0; i < filenames.size(); i++) {
uint64_t number;
@@ -129,7 +132,7 @@ class RecoveryTest {
uint64_t FileSize(const std::string& fname) {
uint64_t result;
- ASSERT_OK(env_->GetFileSize(fname, &result)) << fname;
+ EXPECT_LEVELDB_OK(env_->GetFileSize(fname, &result)) << fname;
return result;
}
@@ -139,13 +142,13 @@ class RecoveryTest {
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
std::string fname = LogFileName(dbname_, lognum);
WritableFile* file;
- ASSERT_OK(env_->NewWritableFile(fname, &file));
+ ASSERT_LEVELDB_OK(env_->NewWritableFile(fname, &file));
log::Writer writer(file);
WriteBatch batch;
batch.Put(key, val);
WriteBatchInternal::SetSequence(&batch, seq);
- ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
- ASSERT_OK(file->Flush());
+ ASSERT_LEVELDB_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
+ ASSERT_LEVELDB_OK(file->Flush());
delete file;
}
@@ -155,12 +158,12 @@ class RecoveryTest {
DB* db_;
};
-TEST(RecoveryTest, ManifestReused) {
+TEST_F(RecoveryTest, ManifestReused) {
if (!CanAppend()) {
fprintf(stderr, "skipping test because env does not support appending\n");
return;
}
- ASSERT_OK(Put("foo", "bar"));
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close();
std::string old_manifest = ManifestFileName();
Open();
@@ -171,12 +174,12 @@ TEST(RecoveryTest, ManifestReused) {
ASSERT_EQ("bar", Get("foo"));
}
-TEST(RecoveryTest, LargeManifestCompacted) {
+TEST_F(RecoveryTest, LargeManifestCompacted) {
if (!CanAppend()) {
fprintf(stderr, "skipping test because env does not support appending\n");
return;
}
- ASSERT_OK(Put("foo", "bar"));
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close();
std::string old_manifest = ManifestFileName();
@@ -184,10 +187,10 @@ TEST(RecoveryTest, LargeManifestCompacted) {
{
uint64_t len = FileSize(old_manifest);
WritableFile* file;
- ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
+ ASSERT_LEVELDB_OK(env()->NewAppendableFile(old_manifest, &file));
std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
- ASSERT_OK(file->Append(zeroes));
- ASSERT_OK(file->Flush());
+ ASSERT_LEVELDB_OK(file->Append(zeroes));
+ ASSERT_LEVELDB_OK(file->Flush());
delete file;
}
@@ -202,8 +205,8 @@ TEST(RecoveryTest, LargeManifestCompacted) {
ASSERT_EQ("bar", Get("foo"));
}
-TEST(RecoveryTest, NoLogFiles) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(RecoveryTest, NoLogFiles) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ(1, DeleteLogFiles());
Open();
ASSERT_EQ("NOT_FOUND", Get("foo"));
@@ -211,13 +214,13 @@ TEST(RecoveryTest, NoLogFiles) {
ASSERT_EQ("NOT_FOUND", Get("foo"));
}
-TEST(RecoveryTest, LogFileReuse) {
+TEST_F(RecoveryTest, LogFileReuse) {
if (!CanAppend()) {
fprintf(stderr, "skipping test because env does not support appending\n");
return;
}
for (int i = 0; i < 2; i++) {
- ASSERT_OK(Put("foo", "bar"));
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
if (i == 0) {
// Compact to ensure current log is empty
CompactMemTable();
@@ -241,13 +244,13 @@ TEST(RecoveryTest, LogFileReuse) {
}
}
-TEST(RecoveryTest, MultipleMemTables) {
+TEST_F(RecoveryTest, MultipleMemTables) {
// Make a large log.
const int kNum = 1000;
for (int i = 0; i < kNum; i++) {
char buf[100];
snprintf(buf, sizeof(buf), "%050d", i);
- ASSERT_OK(Put(buf, buf));
+ ASSERT_LEVELDB_OK(Put(buf, buf));
}
ASSERT_EQ(0, NumTables());
Close();
@@ -270,8 +273,8 @@ TEST(RecoveryTest, MultipleMemTables) {
}
}
-TEST(RecoveryTest, MultipleLogFiles) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(RecoveryTest, MultipleLogFiles) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close();
ASSERT_EQ(1, NumLogs());
@@ -316,8 +319,8 @@ TEST(RecoveryTest, MultipleLogFiles) {
ASSERT_EQ("there", Get("hi"));
}
-TEST(RecoveryTest, ManifestMissing) {
- ASSERT_OK(Put("foo", "bar"));
+TEST_F(RecoveryTest, ManifestMissing) {
+ ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close();
DeleteManifestFile();
@@ -327,4 +330,7 @@ TEST(RecoveryTest, ManifestMissing) {
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 9fa2d96..04b9fa7 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -7,13 +7,14 @@
#include <atomic>
#include <set>
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
-#include "util/testharness.h"
+#include "util/testutil.h"
namespace leveldb {
@@ -31,8 +32,6 @@ struct Comparator {
}
};
-class SkipTest {};
-
TEST(SkipTest, Empty) {
Arena arena;
Comparator cmp;
@@ -366,4 +365,7 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index 0b7cda8..228fa3b 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -3,7 +3,8 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/version_edit.h"
-#include "util/testharness.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
namespace leveldb {
@@ -17,8 +18,6 @@ static void TestEncodeDecode(const VersionEdit& edit) {
ASSERT_EQ(encoded, encoded2);
}
-class VersionEditTest {};
-
TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50;
@@ -41,4 +40,7 @@ TEST(VersionEditTest, EncodeDecode) {
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index c1056a1..71b19a7 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -3,13 +3,14 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/version_set.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "util/logging.h"
-#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
-class FindFileTest {
+class FindFileTest : public testing::Test {
public:
FindFileTest() : disjoint_sorted_files_(true) {}
@@ -50,7 +51,7 @@ class FindFileTest {
std::vector<FileMetaData*> files_;
};
-TEST(FindFileTest, Empty) {
+TEST_F(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo"));
ASSERT_TRUE(!Overlaps("a", "z"));
ASSERT_TRUE(!Overlaps(nullptr, "z"));
@@ -58,7 +59,7 @@ TEST(FindFileTest, Empty) {
ASSERT_TRUE(!Overlaps(nullptr, nullptr));
}
-TEST(FindFileTest, Single) {
+TEST_F(FindFileTest, Single) {
Add("p", "q");
ASSERT_EQ(0, Find("a"));
ASSERT_EQ(0, Find("p"));
@@ -88,7 +89,7 @@ TEST(FindFileTest, Single) {
ASSERT_TRUE(Overlaps(nullptr, nullptr));
}
-TEST(FindFileTest, Multiple) {
+TEST_F(FindFileTest, Multiple) {
Add("150", "200");
Add("200", "250");
Add("300", "350");
@@ -126,7 +127,7 @@ TEST(FindFileTest, Multiple) {
ASSERT_TRUE(Overlaps("450", "500"));
}
-TEST(FindFileTest, MultipleNullBoundaries) {
+TEST_F(FindFileTest, MultipleNullBoundaries) {
Add("150", "200");
Add("200", "250");
Add("300", "350");
@@ -146,7 +147,7 @@ TEST(FindFileTest, MultipleNullBoundaries) {
ASSERT_TRUE(Overlaps("450", nullptr));
}
-TEST(FindFileTest, OverlapSequenceChecks) {
+TEST_F(FindFileTest, OverlapSequenceChecks) {
Add("200", "200", 5000, 3000);
ASSERT_TRUE(!Overlaps("199", "199"));
ASSERT_TRUE(!Overlaps("201", "300"));
@@ -155,7 +156,7 @@ TEST(FindFileTest, OverlapSequenceChecks) {
ASSERT_TRUE(Overlaps("200", "210"));
}
-TEST(FindFileTest, OverlappingFiles) {
+TEST_F(FindFileTest, OverlappingFiles) {
Add("150", "600");
Add("400", "500");
disjoint_sorted_files_ = false;
@@ -177,7 +178,7 @@ void AddBoundaryInputs(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& level_files,
std::vector<FileMetaData*>* compaction_files);
-class AddBoundaryInputsTest {
+class AddBoundaryInputsTest : public testing::Test {
public:
std::vector<FileMetaData*> level_files_;
std::vector<FileMetaData*> compaction_files_;
@@ -204,13 +205,13 @@ class AddBoundaryInputsTest {
}
};
-TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
+TEST_F(AddBoundaryInputsTest, TestEmptyFileSets) {
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_TRUE(compaction_files_.empty());
ASSERT_TRUE(level_files_.empty());
}
-TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
+TEST_F(AddBoundaryInputsTest, TestEmptyLevelFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue)));
@@ -222,7 +223,7 @@ TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
ASSERT_TRUE(level_files_.empty());
}
-TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
+TEST_F(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue)));
@@ -234,7 +235,7 @@ TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
ASSERT_EQ(f1, level_files_[0]);
}
-TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
+TEST_F(AddBoundaryInputsTest, TestNoBoundaryFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue)));
@@ -255,7 +256,7 @@ TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
ASSERT_EQ(2, compaction_files_.size());
}
-TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
+TEST_F(AddBoundaryInputsTest, TestOneBoundaryFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
InternalKey(InternalKey("100", 2, kTypeValue)));
@@ -277,7 +278,7 @@ TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
ASSERT_EQ(f2, compaction_files_[1]);
}
-TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
+TEST_F(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue)));
@@ -300,7 +301,7 @@ TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
ASSERT_EQ(f2, compaction_files_[2]);
}
-TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
+TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue)));
@@ -329,4 +330,7 @@ TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index c32317f..b33993a 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
-
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/logging.h"
-#include "util/testharness.h"
namespace leveldb {
@@ -22,7 +21,7 @@ static std::string PrintContents(WriteBatch* b) {
Iterator* iter = mem->NewIterator();
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey ikey;
- ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey));
+ EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
switch (ikey.type) {
case kTypeValue:
state.append("Put(");
@@ -52,8 +51,6 @@ static std::string PrintContents(WriteBatch* b) {
return state;
}
-class WriteBatchTest {};
-
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
ASSERT_EQ("", PrintContents(&batch));
@@ -134,4 +131,7 @@ TEST(WriteBatchTest, ApproximateSize) {
} // namespace leveldb
-int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}