diff options
author | Andras Becsi <andras.becsi@digia.com> | 2014-03-18 13:16:26 +0100 |
---|---|---|
committer | Frederik Gladhorn <frederik.gladhorn@digia.com> | 2014-03-20 15:55:39 +0100 |
commit | 3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch) | |
tree | 92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/third_party/leveldatabase | |
parent | e90d7c4b152c56919d963987e2503f9909a666d2 (diff) | |
download | qtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz |
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies
needed on Windows.
Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42
Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu>
Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/third_party/leveldatabase')
27 files changed, 794 insertions, 417 deletions
diff --git a/chromium/third_party/leveldatabase/README.chromium b/chromium/third_party/leveldatabase/README.chromium index 32670a7c107..92e6ba99470 100644 --- a/chromium/third_party/leveldatabase/README.chromium +++ b/chromium/third_party/leveldatabase/README.chromium @@ -1,7 +1,7 @@ Name: LevelDB: A Fast Persistent Key-Value Store Short Name: leveldb URL: http://code.google.com/p/leveldb/ -Version: r77 +Version: r78 License: New BSD License File: src/LICENSE Security Critical: yes @@ -22,3 +22,4 @@ Local Additions: * ChromiumEnv wraps low-level I/O calls that may be interrupted with a HANDLE_EINTR macro that retries the call. * TRACE macros/thread name for chrome://tracing diagnostics +* Handle in-process exclusive file locks, based on src/util/env_posix.cc diff --git a/chromium/third_party/leveldatabase/env_chromium.cc b/chromium/third_party/leveldatabase/env_chromium.cc index 9cba7291eac..b5a866c2918 100644 --- a/chromium/third_party/leveldatabase/env_chromium.cc +++ b/chromium/third_party/leveldatabase/env_chromium.cc @@ -4,6 +4,7 @@ #include <errno.h> #include <stdio.h> +#include <string.h> #include <deque> @@ -37,6 +38,7 @@ #endif #if defined(OS_POSIX) +#include <dirent.h> #include <fcntl.h> #include <sys/resource.h> #include <sys/time.h> @@ -48,6 +50,10 @@ namespace leveldb_env { namespace { +const base::FilePath::CharType backup_table_extension[] = + FILE_PATH_LITERAL(".bak"); +const base::FilePath::CharType table_extension[] = FILE_PATH_LITERAL(".ldb"); + #if (defined(OS_POSIX) && !defined(OS_LINUX)) || defined(OS_WIN) // The following are glibc-specific @@ -207,6 +213,7 @@ class ChromiumRandomAccessFile: public RandomAccessFile { class ChromiumFileLock : public FileLock { public: ::base::PlatformFile file_; + std::string name_; }; class Retrier { @@ -255,7 +262,10 @@ class Retrier { class IDBEnv : public ChromiumEnv { public: - IDBEnv() : ChromiumEnv() { name_ = "LevelDBEnv.IDB"; } + IDBEnv() : ChromiumEnv() { + name_ = "LevelDBEnv.IDB"; + make_backup_ = true; + } }; ::base::LazyInstance<IDBEnv>::Leaky idb_env = LAZY_INSTANCE_INITIALIZER; @@ -307,6 +317,8 @@ const char* MethodIDToString(MethodID method) { return "NewLogger"; case kSyncParent: return "SyncParent"; + case kGetChildren: + return "GetChildren"; case kNumEntries: NOTREACHED(); return "kNumEntries"; @@ -379,7 +391,74 @@ ErrorParsingResult ParseMethodAndError(const char* string, return NONE; } -bool IndicatesDiskFull(leveldb::Status status) { +// Keep in sync with LevelDBCorruptionTypes in histograms.xml. Also, don't +// change the order because indices into this array have been recorded in uma +// histograms. +const char* patterns[] = { + "missing files", + "log record too small", + "corrupted internal key", + "partial record", + "missing start of fragmented record", + "error in middle of record", + "unknown record type", + "truncated record at end", + "bad record length", + "VersionEdit", + "FileReader invoked with unexpected value", + "corrupted key", + "CURRENT file does not end with newline", + "no meta-nextfile entry", + "no meta-lognumber entry", + "no last-sequence-number entry", + "malformed WriteBatch", + "bad WriteBatch Put", + "bad WriteBatch Delete", + "unknown WriteBatch tag", + "WriteBatch has wrong count", + "bad entry in block", + "bad block contents", + "bad block handle", + "truncated block read", + "block checksum mismatch", + "checksum mismatch", + "corrupted compressed block contents", + "bad block type", + "bad magic number", + "file is too short", +}; + +// Returns 1-based index into the above array or 0 if nothing matches. +int GetCorruptionCode(const leveldb::Status& status) { + DCHECK(!status.IsIOError()); + DCHECK(!status.ok()); + const int kOtherError = 0; + int error = kOtherError; + const std::string& str_error = status.ToString(); + const size_t kNumPatterns = arraysize(patterns); + for (size_t i = 0; i < kNumPatterns; ++i) { + if (str_error.find(patterns[i]) != std::string::npos) { + error = i + 1; + break; + } + } + return error; +} + +int GetNumCorruptionCodes() { + // + 1 for the "other" error that is returned when a corruption message + // doesn't match any of the patterns. + return arraysize(patterns) + 1; +} + +std::string GetCorruptionMessage(const leveldb::Status& status) { + int code = GetCorruptionCode(status); + if (code == 0) + return "Unknown corruption"; + return patterns[code - 1]; +} + +bool IndicatesDiskFull(const leveldb::Status& status) { if (status.ok()) return false; leveldb_env::MethodID method; @@ -392,6 +471,20 @@ bool IndicatesDiskFull(leveldb::Status status) { (result == leveldb_env::METHOD_AND_ERRNO && error == ENOSPC); } +bool IsIOError(const leveldb::Status& status) { + leveldb_env::MethodID method; + int error = -1; + leveldb_env::ErrorParsingResult result = leveldb_env::ParseMethodAndError( + status.ToString().c_str(), &method, &error); + return result != leveldb_env::NONE; +} + +bool IsCorruption(const leveldb::Status& status) { + // LevelDB returns InvalidArgument when an sst file is truncated but there is + // no IsInvalidArgument() accessor defined. + return status.IsCorruption() || (!status.ok() && !IsIOError(status)); +} + std::string FilePathToString(const base::FilePath& file_path) { #if defined(OS_WIN) return UTF16ToUTF8(file_path.value()); @@ -403,13 +496,20 @@ std::string FilePathToString(const base::FilePath& file_path) { ChromiumWritableFile::ChromiumWritableFile(const std::string& fname, FILE* f, const UMALogger* uma_logger, - WriteTracker* tracker) - : filename_(fname), file_(f), uma_logger_(uma_logger), tracker_(tracker) { + WriteTracker* tracker, + bool make_backup) + : filename_(fname), + file_(f), + uma_logger_(uma_logger), + tracker_(tracker), + file_type_(kOther), + make_backup_(make_backup) { base::FilePath path = base::FilePath::FromUTF8Unsafe(fname); - is_manifest_ = - FilePathToString(path.BaseName()).find("MANIFEST") != - std::string::npos; - if (!is_manifest_) + if (FilePathToString(path.BaseName()).find("MANIFEST") == 0) + file_type_ = kManifest; + else if (path.MatchesExtension(table_extension)) + file_type_ = kTable; + if (file_type_ != kManifest) tracker_->DidCreateNewFile(filename_); parent_dir_ = FilePathToString(CreateFilePath(fname).DirName()); } @@ -438,13 +538,13 @@ Status ChromiumWritableFile::SyncParent() { s = MakeIOError( parent_dir_, strerror(saved_errno), kSyncParent, saved_errno); }; - HANDLE_EINTR(close(parent_fd)); + close(parent_fd); #endif return s; } Status ChromiumWritableFile::Append(const Slice& data) { - if (is_manifest_ && tracker_->DoesDirNeedSync(filename_)) { + if (file_type_ == kManifest && tracker_->DoesDirNeedSync(filename_)) { Status s = SyncParent(); if (!s.ok()) return s; @@ -482,6 +582,13 @@ Status ChromiumWritableFile::Flush() { return result; } +static bool MakeBackup(const std::string& fname) { + base::FilePath original_table_name = CreateFilePath(fname); + base::FilePath backup_table_name = + original_table_name.ReplaceExtension(backup_table_extension); + return base::CopyFile(original_table_name, backup_table_name); +} + Status ChromiumWritableFile::Sync() { TRACE_EVENT0("leveldb", "ChromiumEnv::Sync"); Status result; @@ -497,12 +604,16 @@ Status ChromiumWritableFile::Sync() { if (error) { result = MakeIOError(filename_, strerror(error), kWritableFileSync, error); uma_logger_->RecordErrorAt(kWritableFileSync); + } else if (make_backup_ && file_type_ == kTable) { + bool success = MakeBackup(filename_); + uma_logger_->RecordBackupResult(success); } return result; } ChromiumEnv::ChromiumEnv() : name_("LevelDBEnv"), + make_backup_(false), bgsignal_(&mu_), started_bgthread_(false), kMaxRetryTimeMillis(1000) { @@ -572,7 +683,7 @@ Status ChromiumEnv::NewWritableFile(const std::string& fname, return MakeIOError( fname, strerror(saved_errno), kNewWritableFile, saved_errno); } else { - *result = new ChromiumWritableFile(fname, f, this, this); + *result = new ChromiumWritableFile(fname, f, this, this, make_backup_); return Status::OK(); } } @@ -581,29 +692,149 @@ bool ChromiumEnv::FileExists(const std::string& fname) { return ::base::PathExists(CreateFilePath(fname)); } -Status ChromiumEnv::GetChildren(const std::string& dir, +base::FilePath ChromiumEnv::RestoreFromBackup(const base::FilePath& base_name) { + base::FilePath table_name = + base_name.AddExtension(table_extension); + bool result = base::CopyFile(base_name.AddExtension(backup_table_extension), + table_name); + std::string uma_name(name_); + uma_name.append(".TableRestore"); + base::BooleanHistogram::FactoryGet( + uma_name, base::Histogram::kUmaTargetedHistogramFlag)->AddBoolean(result); + return table_name; +} + +void ChromiumEnv::RestoreIfNecessary(const std::string& dir, + std::vector<std::string>* result) { + std::set<base::FilePath> tables_found; + std::set<base::FilePath> backups_found; + for (std::vector<std::string>::iterator it = result->begin(); + it != result->end(); + ++it) { + base::FilePath current = CreateFilePath(*it); + if (current.MatchesExtension(table_extension)) + tables_found.insert(current.RemoveExtension()); + if (current.MatchesExtension(backup_table_extension)) + backups_found.insert(current.RemoveExtension()); + } + std::set<base::FilePath> backups_only; + std::set_difference(backups_found.begin(), + backups_found.end(), + tables_found.begin(), + tables_found.end(), + std::inserter(backups_only, backups_only.begin())); + if (backups_only.size()) { + std::string uma_name(name_); + uma_name.append(".MissingFiles"); + int num_missing_files = + backups_only.size() > INT_MAX ? INT_MAX : backups_only.size(); + base::Histogram::FactoryGet(uma_name, + 1 /*min*/, + 100 /*max*/, + 8 /*num_buckets*/, + base::Histogram::kUmaTargetedHistogramFlag) + ->Add(num_missing_files); + } + base::FilePath dir_filepath = base::FilePath::FromUTF8Unsafe(dir); + for (std::set<base::FilePath>::iterator it = backups_only.begin(); + it != backups_only.end(); + ++it) { + base::FilePath restored_table_name = + RestoreFromBackup(dir_filepath.Append(*it)); + result->push_back(FilePathToString(restored_table_name.BaseName())); + } +} + +namespace { +#if defined(OS_WIN) +static base::PlatformFileError GetDirectoryEntries( + const base::FilePath& dir_param, + std::vector<base::FilePath>* result) { + result->clear(); + base::FilePath dir_filepath = dir_param.Append(FILE_PATH_LITERAL("*")); + WIN32_FIND_DATA find_data; + HANDLE find_handle = FindFirstFile(dir_filepath.value().c_str(), &find_data); + if (find_handle == INVALID_HANDLE_VALUE) { + DWORD last_error = GetLastError(); + if (last_error == ERROR_FILE_NOT_FOUND) + return base::PLATFORM_FILE_OK; + return base::LastErrorToPlatformFileError(last_error); + } + do { + base::FilePath filepath(find_data.cFileName); + base::FilePath::StringType basename = filepath.BaseName().value(); + if (basename == FILE_PATH_LITERAL(".") || + basename == FILE_PATH_LITERAL("..")) + continue; + result->push_back(filepath.BaseName()); + } while (FindNextFile(find_handle, &find_data)); + DWORD last_error = GetLastError(); + base::PlatformFileError return_value = base::PLATFORM_FILE_OK; + if (last_error != ERROR_NO_MORE_FILES) + return_value = base::LastErrorToPlatformFileError(last_error); + FindClose(find_handle); + return return_value; +} +#else +static base::PlatformFileError GetDirectoryEntries( + const base::FilePath& dir_filepath, + std::vector<base::FilePath>* result) { + const std::string dir_string = FilePathToString(dir_filepath); + result->clear(); + DIR* dir = opendir(dir_string.c_str()); + if (!dir) + return base::ErrnoToPlatformFileError(errno); + struct dirent dent_buf; + struct dirent* dent; + int readdir_result; + while ((readdir_result = readdir_r(dir, &dent_buf, &dent)) == 0 && dent) { + if (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0) + continue; + result->push_back(CreateFilePath(dent->d_name)); + } + int saved_errno = errno; + closedir(dir); + if (readdir_result != 0) + return base::ErrnoToPlatformFileError(saved_errno); + return base::PLATFORM_FILE_OK; +} +#endif +} + +Status ChromiumEnv::GetChildren(const std::string& dir_string, std::vector<std::string>* result) { + std::vector<base::FilePath> entries; + base::PlatformFileError error = + GetDirectoryEntries(CreateFilePath(dir_string), &entries); + if (error != base::PLATFORM_FILE_OK) { + RecordOSError(kGetChildren, error); + return MakeIOError( + dir_string, "Could not open/read directory", kGetChildren, error); + } result->clear(); - base::FileEnumerator iter( - CreateFilePath(dir), false, base::FileEnumerator::FILES); - base::FilePath current = iter.Next(); - while (!current.empty()) { - result->push_back(FilePathToString(current.BaseName())); - current = iter.Next(); - } - // TODO(jorlow): Unfortunately, the FileEnumerator swallows errors, so - // we'll always return OK. Maybe manually check for error - // conditions like the file not existing? + for (std::vector<base::FilePath>::iterator it = entries.begin(); + it != entries.end(); + ++it) { + result->push_back(FilePathToString(*it)); + } + + if (make_backup_) + RestoreIfNecessary(dir_string, result); return Status::OK(); } Status ChromiumEnv::DeleteFile(const std::string& fname) { Status result; + base::FilePath fname_filepath = CreateFilePath(fname); // TODO(jorlow): Should we assert this is a file? - if (!::base::DeleteFile(CreateFilePath(fname), false)) { + if (!::base::DeleteFile(fname_filepath, false)) { result = MakeIOError(fname, "Could not delete file.", kDeleteFile); RecordErrorAt(kDeleteFile); } + if (make_backup_ && fname_filepath.MatchesExtension(table_extension)) { + base::DeleteFile(fname_filepath.ReplaceExtension(backup_table_extension), + false); + } return result; } @@ -612,7 +843,7 @@ Status ChromiumEnv::CreateDir(const std::string& name) { base::PlatformFileError error = base::PLATFORM_FILE_OK; Retrier retrier(kCreateDir, this); do { - if (::file_util::CreateDirectoryAndGetError(CreateFilePath(name), &error)) + if (base::CreateDirectoryAndGetError(CreateFilePath(name), &error)) return result; } while (retrier.ShouldKeepTrying(error)); result = MakeIOError(name, "Could not create directory.", kCreateDir, error); @@ -633,7 +864,7 @@ Status ChromiumEnv::DeleteDir(const std::string& name) { Status ChromiumEnv::GetFileSize(const std::string& fname, uint64_t* size) { Status s; int64_t signed_size; - if (!::file_util::GetFileSize(CreateFilePath(fname), &signed_size)) { + if (!::base::GetFileSize(CreateFilePath(fname), &signed_size)) { *size = 0; s = MakeIOError(fname, "Could not determine file size.", kGetFileSize); RecordErrorAt(kGetFileSize); @@ -672,9 +903,7 @@ Status ChromiumEnv::LockFile(const std::string& fname, FileLock** lock) { Status result; int flags = ::base::PLATFORM_FILE_OPEN_ALWAYS | ::base::PLATFORM_FILE_READ | - ::base::PLATFORM_FILE_WRITE | - ::base::PLATFORM_FILE_EXCLUSIVE_READ | - ::base::PLATFORM_FILE_EXCLUSIVE_WRITE; + ::base::PLATFORM_FILE_WRITE; bool created; ::base::PlatformFileError error_code; ::base::PlatformFile file; @@ -703,21 +932,55 @@ Status ChromiumEnv::LockFile(const std::string& fname, FileLock** lock) { result = MakeIOError( fname, PlatformFileErrorString(error_code), kLockFile, error_code); RecordOSError(kLockFile, error_code); - } else { - ChromiumFileLock* my_lock = new ChromiumFileLock; - my_lock->file_ = file; - *lock = my_lock; + return result; + } + + if (!locks_.Insert(fname)) { + result = MakeIOError(fname, "Lock file already locked.", kLockFile); + ::base::ClosePlatformFile(file); + return result; } + + Retrier lock_retrier = Retrier(kLockFile, this); + do { + error_code = ::base::LockPlatformFile(file); + } while (error_code != ::base::PLATFORM_FILE_OK && + retrier.ShouldKeepTrying(error_code)); + + if (error_code != ::base::PLATFORM_FILE_OK) { + ::base::ClosePlatformFile(file); + locks_.Remove(fname); + result = MakeIOError( + fname, PlatformFileErrorString(error_code), kLockFile, error_code); + RecordOSError(kLockFile, error_code); + return result; + } + + ChromiumFileLock* my_lock = new ChromiumFileLock; + my_lock->file_ = file; + my_lock->name_ = fname; + *lock = my_lock; return result; } Status ChromiumEnv::UnlockFile(FileLock* lock) { ChromiumFileLock* my_lock = reinterpret_cast<ChromiumFileLock*>(lock); Status result; - if (!::base::ClosePlatformFile(my_lock->file_)) { - result = MakeIOError("Could not close lock file.", "", kUnlockFile); + + ::base::PlatformFileError error_code = + ::base::UnlockPlatformFile(my_lock->file_); + if (error_code != ::base::PLATFORM_FILE_OK) { + result = + MakeIOError(my_lock->name_, "Could not unlock lock file.", kUnlockFile); + RecordOSError(kUnlockFile, error_code); + ::base::ClosePlatformFile(my_lock->file_); + } else if (!::base::ClosePlatformFile(my_lock->file_)) { + result = + MakeIOError(my_lock->name_, "Could not close lock file.", kUnlockFile); RecordErrorAt(kUnlockFile); } + bool removed = locks_.Remove(my_lock->name_); + DCHECK(removed); delete my_lock; return result; } @@ -725,8 +988,8 @@ Status ChromiumEnv::UnlockFile(FileLock* lock) { Status ChromiumEnv::GetTestDirectory(std::string* path) { mu_.Acquire(); if (test_directory_.empty()) { - if (!::file_util::CreateNewTempDirectory(kLevelDBTestDirectoryPrefix, - &test_directory_)) { + if (!base::CreateNewTempDirectory(kLevelDBTestDirectoryPrefix, + &test_directory_)) { mu_.Release(); RecordErrorAt(kGetTestDirectory); return MakeIOError( @@ -781,6 +1044,13 @@ void ChromiumEnv::RecordOSError(MethodID method, int error) const { GetOSErrorHistogram(method, ERANGE + 1)->Add(error); } +void ChromiumEnv::RecordBackupResult(bool result) const { + std::string uma_name(name_); + uma_name.append(".TableBackup"); + base::BooleanHistogram::FactoryGet( + uma_name, base::Histogram::kUmaTargetedHistogramFlag)->AddBoolean(result); +} + base::HistogramBase* ChromiumEnv::GetOSErrorHistogram(MethodID method, int limit) const { std::string uma_name(name_); diff --git a/chromium/third_party/leveldatabase/env_chromium.h b/chromium/third_party/leveldatabase/env_chromium.h index e84fad78285..d6f9c2451b4 100644 --- a/chromium/third_party/leveldatabase/env_chromium.h +++ b/chromium/third_party/leveldatabase/env_chromium.h @@ -7,6 +7,7 @@ #include <deque> #include <map> +#include <set> #include "base/metrics/histogram.h" #include "base/platform_file.h" @@ -14,6 +15,8 @@ #include "leveldb/env.h" #include "leveldb/slice.h" #include "leveldb/status.h" +#include "port/port_chromium.h" +#include "util/mutexlock.h" namespace leveldb_env { @@ -38,6 +41,7 @@ enum MethodID { kGetTestDirectory, kNewLogger, kSyncParent, + kGetChildren, kNumEntries }; @@ -65,7 +69,12 @@ enum ErrorParsingResult { ErrorParsingResult ParseMethodAndError(const char* string, MethodID* method, int* error); -bool IndicatesDiskFull(leveldb::Status status); +int GetCorruptionCode(const leveldb::Status& status); +int GetNumCorruptionCodes(); +std::string GetCorruptionMessage(const leveldb::Status& status); +bool IndicatesDiskFull(const leveldb::Status& status); +bool IsIOError(const leveldb::Status& status); +bool IsCorruption(const leveldb::Status& status); std::string FilePathToString(const base::FilePath& file_path); class UMALogger { @@ -74,6 +83,7 @@ class UMALogger { virtual void RecordOSError(MethodID method, int saved_errno) const = 0; virtual void RecordOSError(MethodID method, base::PlatformFileError error) const = 0; + virtual void RecordBackupResult(bool success) const = 0; }; class RetrierProvider { @@ -96,7 +106,8 @@ class ChromiumWritableFile : public leveldb::WritableFile { ChromiumWritableFile(const std::string& fname, FILE* f, const UMALogger* uma_logger, - WriteTracker* tracker); + WriteTracker* tracker, + bool make_backup); virtual ~ChromiumWritableFile(); virtual leveldb::Status Append(const leveldb::Slice& data); virtual leveldb::Status Close(); @@ -104,14 +115,20 @@ class ChromiumWritableFile : public leveldb::WritableFile { virtual leveldb::Status Sync(); private: + enum Type { + kManifest, + kTable, + kOther + }; leveldb::Status SyncParent(); std::string filename_; FILE* file_; const UMALogger* uma_logger_; WriteTracker* tracker_; - bool is_manifest_; + Type file_type_; std::string parent_dir_; + bool make_backup_; }; class ChromiumEnv : public leveldb::Env, @@ -155,8 +172,26 @@ class ChromiumEnv : public leveldb::Env, virtual void DidSyncDir(const std::string& fname); std::string name_; + bool make_backup_; private: + // File locks may not be exclusive within a process (e.g. on POSIX). Track + // locks held by the ChromiumEnv to prevent access within the process. + class LockTable { + public: + bool Insert(const std::string& fname) { + leveldb::MutexLock l(&mu_); + return locked_files_.insert(fname).second; + } + bool Remove(const std::string& fname) { + leveldb::MutexLock l(&mu_); + return locked_files_.erase(fname) == 1; + } + private: + leveldb::port::Mutex mu_; + std::set<std::string> locked_files_; + }; + std::map<std::string, bool> needs_sync_map_; base::Lock map_lock_; @@ -171,6 +206,10 @@ class ChromiumEnv : public leveldb::Env, virtual void RecordOSError(MethodID method, int saved_errno) const; virtual void RecordOSError(MethodID method, base::PlatformFileError error) const; + virtual void RecordBackupResult(bool result) const; + void RestoreIfNecessary(const std::string& dir, + std::vector<std::string>* children); + base::FilePath RestoreFromBackup(const base::FilePath& base_name); void RecordOpenFilesLimit(const std::string& type); void RecordLockFileAncestors(int num_missing_ancestors) const; base::HistogramBase* GetOSErrorHistogram(MethodID method, int limit) const; @@ -197,6 +236,7 @@ class ChromiumEnv : public leveldb::Env, }; typedef std::deque<BGItem> BGQueue; BGQueue queue_; + LockTable locks_; }; } // namespace leveldb_env diff --git a/chromium/third_party/leveldatabase/env_chromium_unittest.cc b/chromium/third_party/leveldatabase/env_chromium_unittest.cc index 0789b3c6337..05669f1be01 100644 --- a/chromium/third_party/leveldatabase/env_chromium_unittest.cc +++ b/chromium/third_party/leveldatabase/env_chromium_unittest.cc @@ -2,15 +2,21 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "base/file_util.h" +#include "base/files/file_enumerator.h" #include "base/files/file_path.h" #include "base/files/scoped_temp_dir.h" #include "base/test/test_suite.h" #include "env_chromium.h" #include "testing/gtest/include/gtest/gtest.h" +#include "third_party/leveldatabase/env_idb.h" +#include "third_party/leveldatabase/src/include/leveldb/db.h" using namespace leveldb_env; using namespace leveldb; +#define FPL FILE_PATH_LITERAL + TEST(ErrorEncoding, OnlyAMethod) { const MethodID in_method = kSequentialFileRead; const Status s = MakeIOError("Somefile.txt", "message", in_method); @@ -82,9 +88,10 @@ TEST(ChromiumEnv, DirectorySyncing) { std::string manifest_file_name = FilePathToString(dir_path.Append(FILE_PATH_LITERAL("MANIFEST-001"))); - WritableFile* manifest_file; - Status s = env.NewWritableFile(manifest_file_name, &manifest_file); + WritableFile* manifest_file_ptr; + Status s = env.NewWritableFile(manifest_file_name, &manifest_file_ptr); EXPECT_TRUE(s.ok()); + scoped_ptr<WritableFile> manifest_file(manifest_file_ptr); manifest_file->Append(data); EXPECT_EQ(0, env.directory_syncs()); manifest_file->Append(data); @@ -92,9 +99,10 @@ TEST(ChromiumEnv, DirectorySyncing) { std::string sst_file_name = FilePathToString(dir_path.Append(FILE_PATH_LITERAL("000003.sst"))); - WritableFile* sst_file; - s = env.NewWritableFile(sst_file_name, &sst_file); + WritableFile* sst_file_ptr; + s = env.NewWritableFile(sst_file_name, &sst_file_ptr); EXPECT_TRUE(s.ok()); + scoped_ptr<WritableFile> sst_file(sst_file_ptr); sst_file->Append(data); EXPECT_EQ(0, env.directory_syncs()); @@ -104,4 +112,115 @@ TEST(ChromiumEnv, DirectorySyncing) { EXPECT_EQ(1, env.directory_syncs()); } +int CountFilesWithExtension(const base::FilePath& dir, + const base::FilePath::StringType& extension) { + int matching_files = 0; + base::FileEnumerator dir_reader( + dir, false, base::FileEnumerator::FILES); + for (base::FilePath fname = dir_reader.Next(); !fname.empty(); + fname = dir_reader.Next()) { + if (fname.MatchesExtension(extension)) + matching_files++; + } + return matching_files; +} + +bool GetFirstLDBFile(const base::FilePath& dir, base::FilePath* ldb_file) { + base::FileEnumerator dir_reader( + dir, false, base::FileEnumerator::FILES); + for (base::FilePath fname = dir_reader.Next(); !fname.empty(); + fname = dir_reader.Next()) { + if (fname.MatchesExtension(FPL(".ldb"))) { + *ldb_file = fname; + return true; + } + } + return false; +} + +TEST(ChromiumEnv, BackupTables) { + Options options; + options.create_if_missing = true; + options.env = IDBEnv(); + + base::ScopedTempDir scoped_temp_dir; + scoped_temp_dir.CreateUniqueTempDir(); + base::FilePath dir = scoped_temp_dir.path(); + + DB* db; + Status status = DB::Open(options, dir.AsUTF8Unsafe(), &db); + EXPECT_TRUE(status.ok()) << status.ToString(); + status = db->Put(WriteOptions(), "key", "value"); + EXPECT_TRUE(status.ok()) << status.ToString(); + Slice a = "a"; + Slice z = "z"; + db->CompactRange(&a, &z); + int ldb_files = CountFilesWithExtension(dir, FPL(".ldb")); + int bak_files = CountFilesWithExtension(dir, FPL(".bak")); + EXPECT_GT(ldb_files, 0); + EXPECT_EQ(ldb_files, bak_files); + base::FilePath ldb_file; + EXPECT_TRUE(GetFirstLDBFile(dir, &ldb_file)); + delete db; + EXPECT_TRUE(base::DeleteFile(ldb_file, false)); + EXPECT_EQ(ldb_files - 1, CountFilesWithExtension(dir, FPL(".ldb"))); + + // The ldb file deleted above should be restored in Open. + status = leveldb::DB::Open(options, dir.AsUTF8Unsafe(), &db); + EXPECT_TRUE(status.ok()) << status.ToString(); + std::string value; + status = db->Get(ReadOptions(), "key", &value); + EXPECT_TRUE(status.ok()) << status.ToString(); + EXPECT_EQ("value", value); + delete db; + + // Ensure that deleting an ldb file also deletes its backup. + int orig_ldb_files = CountFilesWithExtension(dir, FPL(".ldb")); + int orig_bak_files = CountFilesWithExtension(dir, FPL(".bak")); + EXPECT_GT(ldb_files, 0); + EXPECT_EQ(ldb_files, bak_files); + EXPECT_TRUE(GetFirstLDBFile(dir, &ldb_file)); + options.env->DeleteFile(ldb_file.AsUTF8Unsafe()); + ldb_files = CountFilesWithExtension(dir, FPL(".ldb")); + bak_files = CountFilesWithExtension(dir, FPL(".bak")); + EXPECT_EQ(orig_ldb_files - 1, ldb_files); + EXPECT_EQ(bak_files, ldb_files); +} + +TEST(ChromiumEnv, GetChildrenEmptyDir) { + base::ScopedTempDir scoped_temp_dir; + scoped_temp_dir.CreateUniqueTempDir(); + base::FilePath dir = scoped_temp_dir.path(); + + Env* env = IDBEnv(); + std::vector<std::string> result; + leveldb::Status status = env->GetChildren(dir.AsUTF8Unsafe(), &result); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(0, result.size()); +} + +TEST(ChromiumEnv, GetChildrenPriorResults) { + base::ScopedTempDir scoped_temp_dir; + scoped_temp_dir.CreateUniqueTempDir(); + base::FilePath dir = scoped_temp_dir.path(); + + base::FilePath new_file_dir = dir.Append(FPL("tmp_file")); + FILE* f = fopen(new_file_dir.AsUTF8Unsafe().c_str(), "w"); + if (f) { + fputs("Temp file contents", f); + fclose(f); + } + + Env* env = IDBEnv(); + std::vector<std::string> result; + leveldb::Status status = env->GetChildren(dir.AsUTF8Unsafe(), &result); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(1, result.size()); + + // And a second time should also return one result + status = env->GetChildren(dir.AsUTF8Unsafe(), &result); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(1, result.size()); +} + int main(int argc, char** argv) { return base::TestSuite(argc, argv).Run(); } diff --git a/chromium/third_party/leveldatabase/leveldatabase.gyp b/chromium/third_party/leveldatabase/leveldatabase.gyp index 782b678b08e..531f9f642be 100644 --- a/chromium/third_party/leveldatabase/leveldatabase.gyp +++ b/chromium/third_party/leveldatabase/leveldatabase.gyp @@ -25,11 +25,6 @@ 'src/include/', ], 'conditions': [ - ['OS == "win"', { - 'include_dirs': [ - 'src/port/win', - ], - }], ['use_snappy', { 'defines': [ 'USE_SNAPPY=1', @@ -61,13 +56,6 @@ 'src/', '.', ], - 'conditions': [ - ['OS == "win"', { - 'include_dirs': [ - 'src/port/win', - ], - }], - ], }, # Patch posted for upstream, can be removed once that's landed and # rolled into Chromium. diff --git a/chromium/third_party/leveldatabase/src/Makefile b/chromium/third_party/leveldatabase/src/Makefile index 26de8c28ed2..344ff2972a5 100644 --- a/chromium/third_party/leveldatabase/src/Makefile +++ b/chromium/third_party/leveldatabase/src/Makefile @@ -72,7 +72,7 @@ SHARED = $(SHARED1) else # Update db.h if you change these. SHARED_MAJOR = 1 -SHARED_MINOR = 14 +SHARED_MINOR = 15 SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT) SHARED2 = $(SHARED1).$(SHARED_MAJOR) SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR) diff --git a/chromium/third_party/leveldatabase/src/build_detect_platform b/chromium/third_party/leveldatabase/src/build_detect_platform index d50869d0c21..6e59c6f8fba 100644 --- a/chromium/third_party/leveldatabase/src/build_detect_platform +++ b/chromium/third_party/leveldatabase/src/build_detect_platform @@ -131,6 +131,16 @@ case "$TARGET_OS" in # man ld: +h internal_name PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl," ;; + IOS) + PLATFORM=IOS + COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX" + [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd` + PORT_FILE=port/port_posix.cc + PLATFORM_SHARED_EXT= + PLATFORM_SHARED_LDFLAGS= + PLATFORM_SHARED_CFLAGS= + PLATFORM_SHARED_VERSIONED= + ;; *) echo "Unknown platform!" >&2 exit 1 diff --git a/chromium/third_party/leveldatabase/src/db/corruption_test.cc b/chromium/third_party/leveldatabase/src/db/corruption_test.cc index b37ffdfe645..96afc68913c 100644 --- a/chromium/third_party/leveldatabase/src/db/corruption_test.cc +++ b/chromium/third_party/leveldatabase/src/db/corruption_test.cc @@ -75,7 +75,13 @@ class CorruptionTest { Slice key = Key(i, &key_space); batch.Clear(); batch.Put(key, Value(i, &value_space)); - ASSERT_OK(db_->Write(WriteOptions(), &batch)); + WriteOptions options; + // Corrupt() doesn't work without this sync on windows; stat reports 0 for + // the file size. + if (i == n - 1) { + options.sync = true; + } + ASSERT_OK(db_->Write(options, &batch)); } } @@ -125,7 +131,7 @@ class CorruptionTest { FileType type; std::string fname; int picked_number = -1; - for (int i = 0; i < filenames.size(); i++) { + for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == filetype && int(number) > picked_number) { // Pick latest file @@ -238,6 +244,22 @@ TEST(CorruptionTest, TableFile) { Check(90, 99); } +TEST(CorruptionTest, TableFileRepair) { + options_.block_size = 2 * kValueSize; // Limit scope of corruption + options_.paranoid_checks = true; + Reopen(); + Build(100); + DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); + dbi->TEST_CompactMemTable(); + dbi->TEST_CompactRange(0, NULL, NULL); + dbi->TEST_CompactRange(1, NULL, NULL); + + Corrupt(kTableFile, 100, 1); + RepairDB(); + Reopen(); + Check(95, 99); +} + TEST(CorruptionTest, TableFileIndexData) { Build(10000); // Enough to build multiple Tables DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); diff --git a/chromium/third_party/leveldatabase/src/db/db_bench.cc b/chromium/third_party/leveldatabase/src/db/db_bench.cc index 7abdf87587d..fc46d89693e 100644 --- a/chromium/third_party/leveldatabase/src/db/db_bench.cc +++ b/chromium/third_party/leveldatabase/src/db/db_bench.cc @@ -128,7 +128,7 @@ class RandomGenerator { pos_ = 0; } - Slice Generate(int len) { + Slice Generate(size_t len) { if (pos_ + len > data_.size()) { pos_ = 0; assert(len < data_.size()); @@ -139,11 +139,11 @@ class RandomGenerator { }; static Slice TrimSpace(Slice s) { - int start = 0; + size_t start = 0; while (start < s.size() && isspace(s[start])) { start++; } - int limit = s.size(); + size_t limit = s.size(); while (limit > start && isspace(s[limit-1])) { limit--; } @@ -399,7 +399,7 @@ class Benchmark { heap_counter_(0) { std::vector<std::string> files; Env::Default()->GetChildren(FLAGS_db, &files); - for (int i = 0; i < files.size(); i++) { + for (size_t i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("heap-")) { Env::Default()->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); } diff --git a/chromium/third_party/leveldatabase/src/db/db_impl.cc b/chromium/third_party/leveldatabase/src/db/db_impl.cc index fa1351038bc..faf5e7d7ba2 100644 --- a/chromium/third_party/leveldatabase/src/db/db_impl.cc +++ b/chromium/third_party/leveldatabase/src/db/db_impl.cc @@ -133,8 +133,7 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) seed_(0), tmp_batch_(new WriteBatch), bg_compaction_scheduled_(false), - manual_compaction_(NULL), - consecutive_compaction_errors_(0) { + manual_compaction_(NULL) { mem_->Ref(); has_imm_.Release_Store(NULL); @@ -217,6 +216,12 @@ void DBImpl::MaybeIgnoreError(Status* s) const { } void DBImpl::DeleteObsoleteFiles() { + if (!bg_error_.ok()) { + // After a background error, we don't know whether a new version may + // or may not have been committed, so we cannot safely garbage collect. + return; + } + // Make a set of all of the live files std::set<uint64_t> live = pending_outputs_; versions_->AddLiveFiles(&live); @@ -495,7 +500,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, return s; } -Status DBImpl::CompactMemTable() { +void DBImpl::CompactMemTable() { mutex_.AssertHeld(); assert(imm_ != NULL); @@ -523,9 +528,9 @@ Status DBImpl::CompactMemTable() { imm_ = NULL; has_imm_.Release_Store(NULL); DeleteObsoleteFiles(); + } else { + RecordBackgroundError(s); } - - return s; } void DBImpl::CompactRange(const Slice* begin, const Slice* end) { @@ -568,16 +573,18 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) { } MutexLock l(&mutex_); - while (!manual.done) { - while (manual_compaction_ != NULL) { - bg_cv_.Wait(); - } - manual_compaction_ = &manual; - MaybeScheduleCompaction(); - while (manual_compaction_ == &manual) { + while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) { + if (manual_compaction_ == NULL) { // Idle + manual_compaction_ = &manual; + MaybeScheduleCompaction(); + } else { // Running either my compaction or another compaction. bg_cv_.Wait(); } } + if (manual_compaction_ == &manual) { + // Cancel my manual compaction since we aborted early for some reason. + manual_compaction_ = NULL; + } } Status DBImpl::TEST_CompactMemTable() { @@ -596,12 +603,22 @@ Status DBImpl::TEST_CompactMemTable() { return s; } +void DBImpl::RecordBackgroundError(const Status& s) { + mutex_.AssertHeld(); + if (bg_error_.ok()) { + bg_error_ = s; + bg_cv_.SignalAll(); + } +} + void DBImpl::MaybeScheduleCompaction() { mutex_.AssertHeld(); if (bg_compaction_scheduled_) { // Already scheduled } else if (shutting_down_.Acquire_Load()) { // DB is being deleted; no more background compactions + } else if (!bg_error_.ok()) { + // Already got an error; no more changes } else if (imm_ == NULL && manual_compaction_ == NULL && !versions_->NeedsCompaction()) { @@ -619,30 +636,12 @@ void DBImpl::BGWork(void* db) { void DBImpl::BackgroundCall() { MutexLock l(&mutex_); assert(bg_compaction_scheduled_); - if (!shutting_down_.Acquire_Load()) { - Status s = BackgroundCompaction(); - if (s.ok()) { - // Success - consecutive_compaction_errors_ = 0; - } else if (shutting_down_.Acquire_Load()) { - // Error most likely due to shutdown; do not wait - } else { - // Wait a little bit before retrying background compaction in - // case this is an environmental problem and we do not want to - // chew up resources for failed compactions for the duration of - // the problem. - bg_cv_.SignalAll(); // In case a waiter can proceed despite the error - Log(options_.info_log, "Waiting after background compaction error: %s", - s.ToString().c_str()); - mutex_.Unlock(); - ++consecutive_compaction_errors_; - int seconds_to_sleep = 1; - for (int i = 0; i < 3 && i < consecutive_compaction_errors_ - 1; ++i) { - seconds_to_sleep *= 2; - } - env_->SleepForMicroseconds(seconds_to_sleep * 1000000); - mutex_.Lock(); - } + if (shutting_down_.Acquire_Load()) { + // No more background work when shutting down. + } else if (!bg_error_.ok()) { + // No more background work after a background error. + } else { + BackgroundCompaction(); } bg_compaction_scheduled_ = false; @@ -653,11 +652,12 @@ void DBImpl::BackgroundCall() { bg_cv_.SignalAll(); } -Status DBImpl::BackgroundCompaction() { +void DBImpl::BackgroundCompaction() { mutex_.AssertHeld(); if (imm_ != NULL) { - return CompactMemTable(); + CompactMemTable(); + return; } Compaction* c; @@ -691,6 +691,9 @@ Status DBImpl::BackgroundCompaction() { c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest, f->largest); status = versions_->LogAndApply(c->edit(), &mutex_); + if (!status.ok()) { + RecordBackgroundError(status); + } VersionSet::LevelSummaryStorage tmp; Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n", static_cast<unsigned long long>(f->number), @@ -701,6 +704,9 @@ Status DBImpl::BackgroundCompaction() { } else { CompactionState* compact = new CompactionState(c); status = DoCompactionWork(compact); + if (!status.ok()) { + RecordBackgroundError(status); + } CleanupCompaction(compact); c->ReleaseInputs(); DeleteObsoleteFiles(); @@ -714,9 +720,6 @@ Status DBImpl::BackgroundCompaction() { } else { Log(options_.info_log, "Compaction error: %s", status.ToString().c_str()); - if (options_.paranoid_checks && bg_error_.ok()) { - bg_error_ = status; - } } if (is_manual) { @@ -732,7 +735,6 @@ Status DBImpl::BackgroundCompaction() { } manual_compaction_ = NULL; } - return status; } void DBImpl::CleanupCompaction(CompactionState* compact) { @@ -1002,6 +1004,9 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { if (status.ok()) { status = InstallCompactionResults(compact); } + if (!status.ok()) { + RecordBackgroundError(status); + } VersionSet::LevelSummaryStorage tmp; Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp)); @@ -1185,13 +1190,23 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { { mutex_.Unlock(); status = log_->AddRecord(WriteBatchInternal::Contents(updates)); + bool sync_error = false; if (status.ok() && options.sync) { status = logfile_->Sync(); + if (!status.ok()) { + sync_error = true; + } } if (status.ok()) { status = WriteBatchInternal::InsertInto(updates, mem_); } mutex_.Lock(); + if (sync_error) { + // The state of the log file is indeterminate: the log record we + // just added may or may not show up when the DB is re-opened. + // So we force the DB into a mode where all future writes fail. + RecordBackgroundError(status); + } } if (updates == tmp_batch_) tmp_batch_->Clear(); diff --git a/chromium/third_party/leveldatabase/src/db/db_impl.h b/chromium/third_party/leveldatabase/src/db/db_impl.h index 75fd30abe9a..cfc998164af 100644 --- a/chromium/third_party/leveldatabase/src/db/db_impl.h +++ b/chromium/third_party/leveldatabase/src/db/db_impl.h @@ -87,8 +87,8 @@ class DBImpl : public DB { // Compact the in-memory write buffer to disk. Switches to a new // log-file/memtable and writes a new descriptor iff successful. - Status CompactMemTable() - EXCLUSIVE_LOCKS_REQUIRED(mutex_); + // Errors are recorded in bg_error_. + void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status RecoverLogFile(uint64_t log_number, VersionEdit* edit, @@ -102,10 +102,12 @@ class DBImpl : public DB { EXCLUSIVE_LOCKS_REQUIRED(mutex_); WriteBatch* BuildBatchGroup(Writer** last_writer); + void RecordBackgroundError(const Status& s); + void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); static void BGWork(void* db); void BackgroundCall(); - Status BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); void CleanupCompaction(CompactionState* compact) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status DoCompactionWork(CompactionState* compact) @@ -170,7 +172,6 @@ class DBImpl : public DB { // Have we encountered a background error in paranoid mode? Status bg_error_; - int consecutive_compaction_errors_; // Per level compaction stats. stats_[level] stores the stats for // compactions that produced data for the specified "level". diff --git a/chromium/third_party/leveldatabase/src/db/db_test.cc b/chromium/third_party/leveldatabase/src/db/db_test.cc index 848a0385484..280b01c14b5 100644 --- a/chromium/third_party/leveldatabase/src/db/db_test.cc +++ b/chromium/third_party/leveldatabase/src/db/db_test.cc @@ -57,8 +57,11 @@ void DelayMilliseconds(int millis) { // Special Env used to delay background operations class SpecialEnv : public EnvWrapper { public: - // sstable Sync() calls are blocked while this pointer is non-NULL. - port::AtomicPointer delay_sstable_sync_; + // sstable/log Sync() calls are blocked while this pointer is non-NULL. + port::AtomicPointer delay_data_sync_; + + // sstable/log Sync() calls return an error. + port::AtomicPointer data_sync_error_; // Simulate no-space errors while this pointer is non-NULL. port::AtomicPointer no_space_; @@ -75,11 +78,9 @@ class SpecialEnv : public EnvWrapper { bool count_random_reads_; AtomicCounter random_read_counter_; - AtomicCounter sleep_counter_; - AtomicCounter sleep_time_counter_; - explicit SpecialEnv(Env* base) : EnvWrapper(base) { - delay_sstable_sync_.Release_Store(NULL); + delay_data_sync_.Release_Store(NULL); + data_sync_error_.Release_Store(NULL); no_space_.Release_Store(NULL); non_writable_.Release_Store(NULL); count_random_reads_ = false; @@ -88,17 +89,17 @@ class SpecialEnv : public EnvWrapper { } Status NewWritableFile(const std::string& f, WritableFile** r) { - class SSTableFile : public WritableFile { + class DataFile : public WritableFile { private: SpecialEnv* env_; WritableFile* base_; public: - SSTableFile(SpecialEnv* env, WritableFile* base) + DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) { } - ~SSTableFile() { delete base_; } + ~DataFile() { delete base_; } Status Append(const Slice& data) { if (env_->no_space_.Acquire_Load() != NULL) { // Drop writes on the floor @@ -110,7 +111,10 @@ class SpecialEnv : public EnvWrapper { Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { - while (env_->delay_sstable_sync_.Acquire_Load() != NULL) { + if (env_->data_sync_error_.Acquire_Load() != NULL) { + return Status::IOError("simulated data sync error"); + } + while (env_->delay_data_sync_.Acquire_Load() != NULL) { DelayMilliseconds(100); } return base_->Sync(); @@ -147,8 +151,9 @@ class SpecialEnv : public EnvWrapper { Status s = target()->NewWritableFile(f, r); if (s.ok()) { - if (strstr(f.c_str(), ".ldb") != NULL) { - *r = new SSTableFile(this, *r); + if (strstr(f.c_str(), ".ldb") != NULL || + strstr(f.c_str(), ".log") != NULL) { + *r = new DataFile(this, *r); } else if (strstr(f.c_str(), "MANIFEST") != NULL) { *r = new ManifestFile(this, *r); } @@ -179,12 +184,6 @@ class SpecialEnv : public EnvWrapper { } return s; } - - virtual void SleepForMicroseconds(int micros) { - sleep_counter_.Increment(); - sleep_time_counter_.IncrementBy(micros); - } - }; class DBTest { @@ -322,7 +321,7 @@ class DBTest { } // Check reverse iteration results are the reverse of forward results - int matched = 0; + size_t matched = 0; for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { ASSERT_LT(matched, forward.size()); ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]); @@ -543,11 +542,11 @@ TEST(DBTest, GetFromImmutableLayer) { ASSERT_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); - env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls + env_->delay_data_sync_.Release_Store(env_); // Block sync calls Put("k1", std::string(100000, 'x')); // Fill memtable Put("k2", std::string(100000, 'y')); // Trigger compaction ASSERT_EQ("v1", Get("foo")); - env_->delay_sstable_sync_.Release_Store(NULL); // Release sync calls + env_->delay_data_sync_.Release_Store(NULL); // Release sync calls } while (ChangeOptions()); } @@ -1534,41 +1533,13 @@ TEST(DBTest, NoSpace) { Compact("a", "z"); const int num_files = CountFiles(); env_->no_space_.Release_Store(env_); // Force out-of-space errors - env_->sleep_counter_.Reset(); - for (int i = 0; i < 5; i++) { + for (int i = 0; i < 10; i++) { for (int level = 0; level < config::kNumLevels-1; level++) { dbfull()->TEST_CompactRange(level, NULL, NULL); } } env_->no_space_.Release_Store(NULL); ASSERT_LT(CountFiles(), num_files + 3); - - // Check that compaction attempts slept after errors - ASSERT_GE(env_->sleep_counter_.Read(), 5); -} - -TEST(DBTest, ExponentialBackoff) { - Options options = CurrentOptions(); - options.env = env_; - Reopen(&options); - - ASSERT_OK(Put("foo", "v1")); - ASSERT_EQ("v1", Get("foo")); - Compact("a", "z"); - env_->non_writable_.Release_Store(env_); // Force errors for new files - env_->sleep_counter_.Reset(); - env_->sleep_time_counter_.Reset(); - for (int i = 0; i < 5; i++) { - dbfull()->TEST_CompactRange(2, NULL, NULL); - } - env_->non_writable_.Release_Store(NULL); - - // Wait for compaction to finish - DelayMilliseconds(1000); - - ASSERT_GE(env_->sleep_counter_.Read(), 5); - ASSERT_LT(env_->sleep_counter_.Read(), 10); - ASSERT_GE(env_->sleep_time_counter_.Read(), 10e6); } TEST(DBTest, NonWritableFileSystem) { @@ -1591,6 +1562,37 @@ TEST(DBTest, NonWritableFileSystem) { env_->non_writable_.Release_Store(NULL); } +TEST(DBTest, WriteSyncError) { + // Check that log sync errors cause the DB to disallow future writes. + + // (a) Cause log sync calls to fail + Options options = CurrentOptions(); + options.env = env_; + Reopen(&options); + env_->data_sync_error_.Release_Store(env_); + + // (b) Normal write should succeed + WriteOptions w; + ASSERT_OK(db_->Put(w, "k1", "v1")); + ASSERT_EQ("v1", Get("k1")); + + // (c) Do a sync write; should fail + w.sync = true; + ASSERT_TRUE(!db_->Put(w, "k2", "v2").ok()); + ASSERT_EQ("v1", Get("k1")); + ASSERT_EQ("NOT_FOUND", Get("k2")); + + // (d) make sync behave normally + env_->data_sync_error_.Release_Store(NULL); + + // (e) Do a non-sync write; should fail + w.sync = false; + ASSERT_TRUE(!db_->Put(w, "k3", "v3").ok()); + ASSERT_EQ("v1", Get("k1")); + ASSERT_EQ("NOT_FOUND", Get("k2")); + ASSERT_EQ("NOT_FOUND", Get("k3")); +} + TEST(DBTest, ManifestWriteError) { // Test for the following problem: // (a) Compaction produces file F @@ -1697,7 +1699,7 @@ TEST(DBTest, BloomFilter) { dbfull()->TEST_CompactMemTable(); // Prevent auto compactions triggered by seeks - env_->delay_sstable_sync_.Release_Store(env_); + env_->delay_data_sync_.Release_Store(env_); // Lookup present keys. Should rarely read from small sstable. env_->random_read_counter_.Reset(); @@ -1718,7 +1720,7 @@ TEST(DBTest, BloomFilter) { fprintf(stderr, "%d missing => %d reads\n", N, reads); ASSERT_LE(reads, 3*N/100); - env_->delay_sstable_sync_.Release_Store(NULL); + env_->delay_data_sync_.Release_Store(NULL); Close(); delete options.block_cache; delete options.filter_policy; @@ -1778,7 +1780,7 @@ static void MTThreadBody(void* arg) { ASSERT_EQ(k, key); ASSERT_GE(w, 0); ASSERT_LT(w, kNumThreads); - ASSERT_LE(c, reinterpret_cast<uintptr_t>( + ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>( t->state->counter[w].Acquire_Load())); } } diff --git a/chromium/third_party/leveldatabase/src/db/repair.cc b/chromium/third_party/leveldatabase/src/db/repair.cc index dc93fb87fcd..96c9b37af14 100644 --- a/chromium/third_party/leveldatabase/src/db/repair.cc +++ b/chromium/third_party/leveldatabase/src/db/repair.cc @@ -244,68 +244,133 @@ class Repairer { void ExtractMetaData() { std::vector<TableInfo> kept; for (size_t i = 0; i < table_numbers_.size(); i++) { - TableInfo t; - t.meta.number = table_numbers_[i]; - Status status = ScanTable(&t); - if (!status.ok()) { - std::string fname = TableFileName(dbname_, table_numbers_[i]); - Log(options_.info_log, "Table #%llu: ignoring %s", - (unsigned long long) table_numbers_[i], - status.ToString().c_str()); - ArchiveFile(fname); - } else { - tables_.push_back(t); - } + ScanTable(table_numbers_[i]); } } - Status ScanTable(TableInfo* t) { - std::string fname = TableFileName(dbname_, t->meta.number); - int counter = 0; - Status status = env_->GetFileSize(fname, &t->meta.file_size); + Iterator* NewTableIterator(const FileMetaData& meta) { + // Same as compaction iterators: if paranoid_checks are on, turn + // on checksum verification. + ReadOptions r; + r.verify_checksums = options_.paranoid_checks; + return table_cache_->NewIterator(r, meta.number, meta.file_size); + } + + void ScanTable(uint64_t number) { + TableInfo t; + t.meta.number = number; + std::string fname = TableFileName(dbname_, number); + Status status = env_->GetFileSize(fname, &t.meta.file_size); if (!status.ok()) { - fname = SSTTableFileName(dbname_, t->meta.number); - Status s2 = env_->GetFileSize(fname, &t->meta.file_size); - if (s2.ok()) + // Try alternate file name. + fname = SSTTableFileName(dbname_, number); + Status s2 = env_->GetFileSize(fname, &t.meta.file_size); + if (s2.ok()) { status = Status::OK(); + } + } + if (!status.ok()) { + ArchiveFile(TableFileName(dbname_, number)); + ArchiveFile(SSTTableFileName(dbname_, number)); + Log(options_.info_log, "Table #%llu: dropped: %s", + (unsigned long long) t.meta.number, + status.ToString().c_str()); + return; } - if (status.ok()) { - Iterator* iter = table_cache_->NewIterator( - ReadOptions(), t->meta.number, t->meta.file_size); - bool empty = true; - ParsedInternalKey parsed; - t->max_sequence = 0; - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - Slice key = iter->key(); - if (!ParseInternalKey(key, &parsed)) { - Log(options_.info_log, "Table #%llu: unparsable key %s", - (unsigned long long) t->meta.number, - EscapeString(key).c_str()); - continue; - } - counter++; - if (empty) { - empty = false; - t->meta.smallest.DecodeFrom(key); - } - t->meta.largest.DecodeFrom(key); - if (parsed.sequence > t->max_sequence) { - t->max_sequence = parsed.sequence; - } + // Extract metadata by scanning through table. + int counter = 0; + Iterator* iter = NewTableIterator(t.meta); + bool empty = true; + ParsedInternalKey parsed; + t.max_sequence = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + Slice key = iter->key(); + if (!ParseInternalKey(key, &parsed)) { + Log(options_.info_log, "Table #%llu: unparsable key %s", + (unsigned long long) t.meta.number, + EscapeString(key).c_str()); + continue; + } + + counter++; + if (empty) { + empty = false; + t.meta.smallest.DecodeFrom(key); } - if (!iter->status().ok()) { - status = iter->status(); + t.meta.largest.DecodeFrom(key); + if (parsed.sequence > t.max_sequence) { + t.max_sequence = parsed.sequence; } - delete iter; } - // If there was trouble opening an .sst file this will report that the .ldb - // file was not found, which is kind of lame but shouldn't happen often. + if (!iter->status().ok()) { + status = iter->status(); + } + delete iter; Log(options_.info_log, "Table #%llu: %d entries %s", - (unsigned long long) t->meta.number, + (unsigned long long) t.meta.number, counter, status.ToString().c_str()); - return status; + + if (status.ok()) { + tables_.push_back(t); + } else { + RepairTable(fname, t); // RepairTable archives input file. + } + } + + void RepairTable(const std::string& src, TableInfo t) { + // We will copy src contents to a new table and then rename the + // new table over the source. + + // Create builder. + std::string copy = TableFileName(dbname_, next_file_number_++); + WritableFile* file; + Status s = env_->NewWritableFile(copy, &file); + if (!s.ok()) { + return; + } + TableBuilder* builder = new TableBuilder(options_, file); + + // Copy data. + Iterator* iter = NewTableIterator(t.meta); + int counter = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + builder->Add(iter->key(), iter->value()); + counter++; + } + delete iter; + + ArchiveFile(src); + if (counter == 0) { + builder->Abandon(); // Nothing to save + } else { + s = builder->Finish(); + if (s.ok()) { + t.meta.file_size = builder->FileSize(); + } + } + delete builder; + builder = NULL; + + if (s.ok()) { + s = file->Close(); + } + delete file; + file = NULL; + + if (counter > 0 && s.ok()) { + std::string orig = TableFileName(dbname_, t.meta.number); + s = env_->RenameFile(copy, orig); + if (s.ok()) { + Log(options_.info_log, "Table #%llu: %d entries repaired", + (unsigned long long) t.meta.number, counter); + tables_.push_back(t); + } + } + if (!s.ok()) { + env_->DeleteFile(copy); + } } Status WriteDescriptor() { diff --git a/chromium/third_party/leveldatabase/src/db/version_set.cc b/chromium/third_party/leveldatabase/src/db/version_set.cc index 66d73be71fb..517edd3b18b 100644 --- a/chromium/third_party/leveldatabase/src/db/version_set.cc +++ b/chromium/third_party/leveldatabase/src/db/version_set.cc @@ -876,12 +876,6 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { } if (!s.ok()) { Log(options_->info_log, "MANIFEST write: %s\n", s.ToString().c_str()); - if (ManifestContains(record)) { - Log(options_->info_log, - "MANIFEST contains log record despite error; advancing to new " - "version to prevent mismatch between in-memory and logged state"); - s = Status::OK(); - } } } @@ -889,8 +883,6 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { // new CURRENT file that points to it. if (s.ok() && !new_manifest_file.empty()) { s = SetCurrentFile(env_, dbname_, manifest_file_number_); - // No need to double-check MANIFEST in case of error since it - // will be discarded below. } mu->Lock(); @@ -1124,31 +1116,6 @@ const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { return scratch->buffer; } -// Return true iff the manifest contains the specified record. -bool VersionSet::ManifestContains(const std::string& record) const { - std::string fname = DescriptorFileName(dbname_, manifest_file_number_); - Log(options_->info_log, "ManifestContains: checking %s\n", fname.c_str()); - SequentialFile* file = NULL; - Status s = env_->NewSequentialFile(fname, &file); - if (!s.ok()) { - Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str()); - return false; - } - log::Reader reader(file, NULL, true/*checksum*/, 0); - Slice r; - std::string scratch; - bool result = false; - while (reader.ReadRecord(&r, &scratch)) { - if (r == Slice(record)) { - result = true; - break; - } - } - delete file; - Log(options_->info_log, "ManifestContains: result = %d\n", result ? 1 : 0); - return result; -} - uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { uint64_t result = 0; for (int level = 0; level < config::kNumLevels; level++) { diff --git a/chromium/third_party/leveldatabase/src/db/version_set.h b/chromium/third_party/leveldatabase/src/db/version_set.h index 20de0e26291..8dc14b8e01c 100644 --- a/chromium/third_party/leveldatabase/src/db/version_set.h +++ b/chromium/third_party/leveldatabase/src/db/version_set.h @@ -292,8 +292,6 @@ class VersionSet { void AppendVersion(Version* v); - bool ManifestContains(const std::string& record) const; - Env* const env_; const std::string dbname_; const Options* const options_; diff --git a/chromium/third_party/leveldatabase/src/include/leveldb/db.h b/chromium/third_party/leveldatabase/src/include/leveldb/db.h index 259a81f4155..5ffb29d5264 100644 --- a/chromium/third_party/leveldatabase/src/include/leveldb/db.h +++ b/chromium/third_party/leveldatabase/src/include/leveldb/db.h @@ -14,7 +14,7 @@ namespace leveldb { // Update Makefile if you change these static const int kMajorVersion = 1; -static const int kMinorVersion = 14; +static const int kMinorVersion = 15; struct Options; struct ReadOptions; diff --git a/chromium/third_party/leveldatabase/src/include/leveldb/env.h b/chromium/third_party/leveldatabase/src/include/leveldb/env.h index fa32289f581..b2072d02c1c 100644 --- a/chromium/third_party/leveldatabase/src/include/leveldb/env.h +++ b/chromium/third_party/leveldatabase/src/include/leveldb/env.h @@ -13,9 +13,9 @@ #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ -#include <cstdarg> #include <string> #include <vector> +#include <stdarg.h> #include <stdint.h> #include "leveldb/status.h" diff --git a/chromium/third_party/leveldatabase/src/port/atomic_pointer.h b/chromium/third_party/leveldatabase/src/port/atomic_pointer.h index e17bf435eab..a9866b2302f 100644 --- a/chromium/third_party/leveldatabase/src/port/atomic_pointer.h +++ b/chromium/third_party/leveldatabase/src/port/atomic_pointer.h @@ -50,6 +50,13 @@ namespace port { // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx #define LEVELDB_HAVE_MEMORY_BARRIER +// Mac OS +#elif defined(OS_MACOSX) +inline void MemoryBarrier() { + OSMemoryBarrier(); +} +#define LEVELDB_HAVE_MEMORY_BARRIER + // Gcc on x86 #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { @@ -68,13 +75,6 @@ inline void MemoryBarrier() { } #define LEVELDB_HAVE_MEMORY_BARRIER -// Mac OS -#elif defined(OS_MACOSX) -inline void MemoryBarrier() { - OSMemoryBarrier(); -} -#define LEVELDB_HAVE_MEMORY_BARRIER - // ARM Linux #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) typedef void (*LinuxKernelMemoryBarrierFunc)(void); diff --git a/chromium/third_party/leveldatabase/src/table/filter_block_test.cc b/chromium/third_party/leveldatabase/src/table/filter_block_test.cc index 3a2a07cf53c..8c4a4741f22 100644 --- a/chromium/third_party/leveldatabase/src/table/filter_block_test.cc +++ b/chromium/third_party/leveldatabase/src/table/filter_block_test.cc @@ -29,7 +29,7 @@ class TestHashFilter : public FilterPolicy { virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const { uint32_t h = Hash(key.data(), key.size(), 1); - for (int i = 0; i + 4 <= filter.size(); i += 4) { + for (size_t i = 0; i + 4 <= filter.size(); i += 4) { if (h == DecodeFixed32(filter.data() + i)) { return true; } diff --git a/chromium/third_party/leveldatabase/src/util/arena.h b/chromium/third_party/leveldatabase/src/util/arena.h index 8f7dde226c4..73bbf1cb9bf 100644 --- a/chromium/third_party/leveldatabase/src/util/arena.h +++ b/chromium/third_party/leveldatabase/src/util/arena.h @@ -5,9 +5,9 @@ #ifndef STORAGE_LEVELDB_UTIL_ARENA_H_ #define STORAGE_LEVELDB_UTIL_ARENA_H_ -#include <cstddef> #include <vector> #include <assert.h> +#include <stddef.h> #include <stdint.h> namespace leveldb { diff --git a/chromium/third_party/leveldatabase/src/util/arena_test.cc b/chromium/third_party/leveldatabase/src/util/arena_test.cc index 63d17780345..58e870ec445 100644 --- a/chromium/third_party/leveldatabase/src/util/arena_test.cc +++ b/chromium/third_party/leveldatabase/src/util/arena_test.cc @@ -40,7 +40,7 @@ TEST(ArenaTest, Simple) { r = arena.Allocate(s); } - for (int b = 0; b < s; b++) { + for (size_t b = 0; b < s; b++) { // Fill the "i"th allocation with a known bit pattern r[b] = i % 256; } @@ -51,10 +51,10 @@ TEST(ArenaTest, Simple) { ASSERT_LE(arena.MemoryUsage(), bytes * 1.10); } } - for (int i = 0; i < allocated.size(); i++) { + for (size_t i = 0; i < allocated.size(); i++) { size_t num_bytes = allocated[i].first; const char* p = allocated[i].second; - for (int b = 0; b < num_bytes; b++) { + for (size_t b = 0; b < num_bytes; b++) { // Check the "i"th allocation for the known bit pattern ASSERT_EQ(int(p[b]) & 0xff, i % 256); } diff --git a/chromium/third_party/leveldatabase/src/util/bloom_test.cc b/chromium/third_party/leveldatabase/src/util/bloom_test.cc index 0bf8e8d6ebe..77fb1b31595 100644 --- a/chromium/third_party/leveldatabase/src/util/bloom_test.cc +++ b/chromium/third_party/leveldatabase/src/util/bloom_test.cc @@ -126,7 +126,8 @@ TEST(BloomTest, VaryingLengths) { } Build(); - ASSERT_LE(FilterSize(), (length * 10 / 8) + 40) << length; + ASSERT_LE(FilterSize(), static_cast<size_t>((length * 10 / 8) + 40)) + << length; // All added keys must match for (int i = 0; i < length; i++) { diff --git a/chromium/third_party/leveldatabase/src/util/coding_test.cc b/chromium/third_party/leveldatabase/src/util/coding_test.cc index fb5726e3352..521541ea61b 100644 --- a/chromium/third_party/leveldatabase/src/util/coding_test.cc +++ b/chromium/third_party/leveldatabase/src/util/coding_test.cc @@ -112,13 +112,13 @@ TEST(Coding, Varint64) { } std::string s; - for (int i = 0; i < values.size(); i++) { + for (size_t i = 0; i < values.size(); i++) { PutVarint64(&s, values[i]); } const char* p = s.data(); const char* limit = p + s.size(); - for (int i = 0; i < values.size(); i++) { + for (size_t i = 0; i < values.size(); i++) { ASSERT_TRUE(p < limit); uint64_t actual; const char* start = p; @@ -143,7 +143,7 @@ TEST(Coding, Varint32Truncation) { std::string s; PutVarint32(&s, large_value); uint32_t result; - for (int len = 0; len < s.size() - 1; len++) { + for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL); } ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL); @@ -162,7 +162,7 @@ TEST(Coding, Varint64Truncation) { std::string s; PutVarint64(&s, large_value); uint64_t result; - for (int len = 0; len < s.size() - 1; len++) { + for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL); } ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL); diff --git a/chromium/third_party/leveldatabase/src/util/env_posix.cc b/chromium/third_party/leveldatabase/src/util/env_posix.cc index 3e2925d1115..e1cbebd1b30 100644 --- a/chromium/third_party/leveldatabase/src/util/env_posix.cc +++ b/chromium/third_party/leveldatabase/src/util/env_posix.cc @@ -175,147 +175,43 @@ class PosixMmapReadableFile: public RandomAccessFile { } }; -// We preallocate up to an extra megabyte and use memcpy to append new -// data to the file. This is safe since we either properly close the -// file before reading from it, or for log files, the reading code -// knows enough to skip zero suffixes. -class PosixMmapFile : public WritableFile { +class PosixWritableFile : public WritableFile { private: std::string filename_; - int fd_; - size_t page_size_; - size_t map_size_; // How much extra memory to map at a time - char* base_; // The mapped region - char* limit_; // Limit of the mapped region - char* dst_; // Where to write next (in range [base_,limit_]) - char* last_sync_; // Where have we synced up to - uint64_t file_offset_; // Offset of base_ in file - - // Have we done an munmap of unsynced data? - bool pending_sync_; - - // Roundup x to a multiple of y - static size_t Roundup(size_t x, size_t y) { - return ((x + y - 1) / y) * y; - } - - size_t TruncateToPageBoundary(size_t s) { - s -= (s & (page_size_ - 1)); - assert((s % page_size_) == 0); - return s; - } - - bool UnmapCurrentRegion() { - bool result = true; - if (base_ != NULL) { - if (last_sync_ < limit_) { - // Defer syncing this data until next Sync() call, if any - pending_sync_ = true; - } - if (munmap(base_, limit_ - base_) != 0) { - result = false; - } - file_offset_ += limit_ - base_; - base_ = NULL; - limit_ = NULL; - last_sync_ = NULL; - dst_ = NULL; - - // Increase the amount we map the next time, but capped at 1MB - if (map_size_ < (1<<20)) { - map_size_ *= 2; - } - } - return result; - } - - bool MapNewRegion() { - assert(base_ == NULL); - if (ftruncate(fd_, file_offset_ + map_size_) < 0) { - return false; - } - void* ptr = mmap(NULL, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED, - fd_, file_offset_); - if (ptr == MAP_FAILED) { - return false; - } - base_ = reinterpret_cast<char*>(ptr); - limit_ = base_ + map_size_; - dst_ = base_; - last_sync_ = base_; - return true; - } + FILE* file_; public: - PosixMmapFile(const std::string& fname, int fd, size_t page_size) - : filename_(fname), - fd_(fd), - page_size_(page_size), - map_size_(Roundup(65536, page_size)), - base_(NULL), - limit_(NULL), - dst_(NULL), - last_sync_(NULL), - file_offset_(0), - pending_sync_(false) { - assert((page_size & (page_size - 1)) == 0); - } - - - ~PosixMmapFile() { - if (fd_ >= 0) { - PosixMmapFile::Close(); + PosixWritableFile(const std::string& fname, FILE* f) + : filename_(fname), file_(f) { } + + ~PosixWritableFile() { + if (file_ != NULL) { + // Ignoring any potential errors + fclose(file_); } } virtual Status Append(const Slice& data) { - const char* src = data.data(); - size_t left = data.size(); - while (left > 0) { - assert(base_ <= dst_); - assert(dst_ <= limit_); - size_t avail = limit_ - dst_; - if (avail == 0) { - if (!UnmapCurrentRegion() || - !MapNewRegion()) { - return IOError(filename_, errno); - } - } - - size_t n = (left <= avail) ? left : avail; - memcpy(dst_, src, n); - dst_ += n; - src += n; - left -= n; + size_t r = fwrite_unlocked(data.data(), 1, data.size(), file_); + if (r != data.size()) { + return IOError(filename_, errno); } return Status::OK(); } virtual Status Close() { - Status s; - size_t unused = limit_ - dst_; - if (!UnmapCurrentRegion()) { - s = IOError(filename_, errno); - } else if (unused > 0) { - // Trim the extra space at the end of the file - if (ftruncate(fd_, file_offset_ - unused) < 0) { - s = IOError(filename_, errno); - } - } - - if (close(fd_) < 0) { - if (s.ok()) { - s = IOError(filename_, errno); - } + Status result; + if (fclose(file_) != 0) { + result = IOError(filename_, errno); } - - fd_ = -1; - base_ = NULL; - limit_ = NULL; - return s; + file_ = NULL; + return result; } virtual Status Flush() { + if (fflush_unlocked(file_) != 0) { + return IOError(filename_, errno); + } return Status::OK(); } @@ -352,26 +248,10 @@ class PosixMmapFile : public WritableFile { if (!s.ok()) { return s; } - - if (pending_sync_) { - // Some unmapped data was not synced - pending_sync_ = false; - if (fdatasync(fd_) < 0) { - s = IOError(filename_, errno); - } + if (fflush_unlocked(file_) != 0 || + fdatasync(fileno(file_)) != 0) { + s = Status::IOError(filename_, strerror(errno)); } - - if (dst_ > last_sync_) { - // Find the beginnings of the pages that contain the first and last - // bytes to be synced. - size_t p1 = TruncateToPageBoundary(last_sync_ - base_); - size_t p2 = TruncateToPageBoundary(dst_ - base_ - 1); - last_sync_ = dst_; - if (msync(base_ + p1, p2 - p1 + page_size_, MS_SYNC) < 0) { - s = IOError(filename_, errno); - } - } - return s; } }; @@ -462,12 +342,12 @@ class PosixEnv : public Env { virtual Status NewWritableFile(const std::string& fname, WritableFile** result) { Status s; - const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644); - if (fd < 0) { + FILE* f = fopen(fname.c_str(), "w"); + if (f == NULL) { *result = NULL; s = IOError(fname, errno); } else { - *result = new PosixMmapFile(fname, fd, page_size_); + *result = new PosixWritableFile(fname, f); } return s; } @@ -630,7 +510,6 @@ class PosixEnv : public Env { return NULL; } - size_t page_size_; pthread_mutex_t mu_; pthread_cond_t bgsignal_; pthread_t bgthread_; @@ -645,8 +524,7 @@ class PosixEnv : public Env { MmapLimiter mmap_limit_; }; -PosixEnv::PosixEnv() : page_size_(getpagesize()), - started_bgthread_(false) { +PosixEnv::PosixEnv() : started_bgthread_(false) { PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); } diff --git a/chromium/third_party/leveldatabase/src/util/testharness.cc b/chromium/third_party/leveldatabase/src/util/testharness.cc index eb1bdd554a3..402fab34d77 100644 --- a/chromium/third_party/leveldatabase/src/util/testharness.cc +++ b/chromium/third_party/leveldatabase/src/util/testharness.cc @@ -38,7 +38,7 @@ int RunAllTests() { int num = 0; if (tests != NULL) { - for (int i = 0; i < tests->size(); i++) { + for (size_t i = 0; i < tests->size(); i++) { const Test& t = (*tests)[i]; if (matcher != NULL) { std::string name = t.base; diff --git a/chromium/third_party/leveldatabase/src/util/testutil.cc b/chromium/third_party/leveldatabase/src/util/testutil.cc index 538d09516d2..bee56bf75f1 100644 --- a/chromium/third_party/leveldatabase/src/util/testutil.cc +++ b/chromium/third_party/leveldatabase/src/util/testutil.cc @@ -32,7 +32,7 @@ std::string RandomKey(Random* rnd, int len) { extern Slice CompressibleString(Random* rnd, double compressed_fraction, - int len, std::string* dst) { + size_t len, std::string* dst) { int raw = static_cast<int>(len * compressed_fraction); if (raw < 1) raw = 1; std::string raw_data; diff --git a/chromium/third_party/leveldatabase/src/util/testutil.h b/chromium/third_party/leveldatabase/src/util/testutil.h index 824e655bd2c..adad3fc1eac 100644 --- a/chromium/third_party/leveldatabase/src/util/testutil.h +++ b/chromium/third_party/leveldatabase/src/util/testutil.h @@ -24,7 +24,7 @@ extern std::string RandomKey(Random* rnd, int len); // "N*compressed_fraction" bytes and return a Slice that references // the generated data. extern Slice CompressibleString(Random* rnd, double compressed_fraction, - int len, std::string* dst); + size_t len, std::string* dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { |