summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/client/examples/mongoperf.cpp13
-rw-r--r--src/mongo/db/catalog/database.h2
-rw-r--r--src/mongo/db/commands/fsync.cpp4
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp14
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h4
-rw-r--r--src/mongo/db/query/query_yield.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h2
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/SConscript6
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp10
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp7
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp24
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp78
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.h28
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp35
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h14
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.cpp93
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h114
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_posix.cpp53
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp9
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp26
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp33
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp51
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp9
-rw-r--r--src/mongo/db/storage/record_fetcher.h4
-rw-r--r--src/mongo/db/storage/storage_engine.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h2
-rw-r--r--src/mongo/db/write_concern.cpp2
-rw-r--r--src/mongo/dbtests/SConscript1
-rw-r--r--src/mongo/dbtests/mmaptests.cpp24
-rw-r--r--src/mongo/dbtests/threadedtests.cpp280
-rw-r--r--src/mongo/util/concurrency/SConscript10
-rw-r--r--src/mongo/util/concurrency/rwlock.h291
-rw-r--r--src/mongo/util/concurrency/rwlockimpl.cpp120
-rw-r--r--src/mongo/util/concurrency/rwlockimpl.h185
-rw-r--r--src/mongo/util/concurrency/shared_mutex_win.hpp593
-rw-r--r--src/mongo/util/concurrency/simplerwlock.h85
-rw-r--r--src/mongo/util/processinfo.h2
52 files changed, 480 insertions, 1830 deletions
diff --git a/src/mongo/client/examples/mongoperf.cpp b/src/mongo/client/examples/mongoperf.cpp
index 0e45db2ff4d..04f83044955 100644
--- a/src/mongo/client/examples/mongoperf.cpp
+++ b/src/mongo/client/examples/mongoperf.cpp
@@ -52,6 +52,7 @@
#include "mongo/util/allocator.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
+#include "mongo/util/scopeguard.h"
#include "mongo/util/time_support.h"
#include "mongo/util/timer.h"
@@ -172,7 +173,13 @@ void go() {
recSizeKB = 4;
verify(recSizeKB <= 64000 && recSizeKB > 0);
- MemoryMappedFile f;
+ auto txn = cc().makeOperationContext();
+ MemoryMappedFile f(txn.get());
+ ON_BLOCK_EXIT([&f, &txn] {
+ LockMongoFilesExclusive lock(txn.get());
+ f.close(txn.get());
+ });
+
cout << "creating test file size:";
len = options["fileSizeMB"].numberLong();
if (len == 0)
@@ -209,8 +216,8 @@ void go() {
if (o["mmf"].trueValue()) {
delete lf;
lf = 0;
- mmfFile = new MemoryMappedFile();
- mmf = (char*)mmfFile->map(fname);
+ mmfFile = new MemoryMappedFile(txn.get());
+ mmf = (char*)mmfFile->map(txn.get(), fname);
verify(mmf);
syncDelaySecs = options["syncDelay"].numberInt();
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index f34dcebfc72..2b405710b78 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -46,9 +46,7 @@
namespace mongo {
class Collection;
-class DataFile;
class DatabaseCatalogEntry;
-class ExtentManager;
class IndexCatalog;
class NamespaceDetails;
class OperationContext;
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index a2d4c3c2651..01d922cec02 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -150,7 +150,7 @@ public:
// Take a global IS lock to ensure the storage engine is not shutdown
Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- result.append("numFiles", storageEngine->flushAllFiles(sync));
+ result.append("numFiles", storageEngine->flushAllFiles(txn, sync));
return true;
}
@@ -362,7 +362,7 @@ void FSyncLockThread::run() {
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
try {
- storageEngine->flushAllFiles(true);
+ storageEngine->flushAllFiles(&txn, true);
} catch (const std::exception& e) {
error() << "error doing flushAll: " << e.what();
fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what());
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index a2aead7cdb4..e12f38a13fe 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -85,7 +85,7 @@ public:
*/
static void ensureInitialized() {
if (!resourceIdFactory) {
- resourceIdFactory = stdx::make_unique<ResourceIdFactory>();
+ resourceIdFactory = new ResourceIdFactory();
}
}
@@ -98,14 +98,14 @@ private:
return ResourceId(RESOURCE_MUTEX, nextId++);
}
- static std::unique_ptr<ResourceIdFactory> resourceIdFactory;
+ static ResourceIdFactory* resourceIdFactory;
std::uint64_t nextId = 0;
std::vector<std::string> labels;
stdx::mutex labelsMutex;
};
-std::unique_ptr<ResourceIdFactory> ResourceIdFactory::resourceIdFactory;
+ResourceIdFactory* ResourceIdFactory::resourceIdFactory;
/**
* Guarantees `ResourceIdFactory::ensureInitialized` is called at least once during initialization.
@@ -127,6 +127,14 @@ std::string Lock::ResourceMutex::getName(ResourceId resourceId) {
return ResourceIdFactory::nameForId(resourceId);
}
+bool Lock::ResourceMutex::isExclusivelyLocked(Locker* locker) {
+ return locker->isLockHeldForMode(_rid, MODE_X);
+}
+
+bool Lock::ResourceMutex::isAtLeastReadLocked(Locker* locker) {
+ return locker->isLockHeldForMode(_rid, MODE_IS);
+}
+
Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
: GlobalLock(locker, lockMode, EnqueueOnly()) {
waitForLock(timeoutMs);
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index 9617bc985a0..fe8ac0f49a1 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -119,6 +119,10 @@ public:
static std::string getName(ResourceId resourceId);
+ bool isExclusivelyLocked(Locker* locker);
+
+ bool isAtLeastReadLocked(Locker* locker);
+
private:
friend class Lock::SharedLock;
friend class Lock::ExclusiveLock;
diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp
index 893184fb14f..6548a662a17 100644
--- a/src/mongo/db/query/query_yield.cpp
+++ b/src/mongo/db/query/query_yield.cpp
@@ -60,7 +60,7 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
Locker::LockSnapshot snapshot;
if (fetcher) {
- fetcher->setup();
+ fetcher->setup(txn);
}
// Nothing was unlocked, just return, yielding is pointless.
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1cb67f64a53..908febd4c76 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -534,7 +534,7 @@ void createOplog(OperationContext* txn, const std::string& oplogCollectionName,
/* sync here so we don't get any surprising lag later when we try to sync */
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->flushAllFiles(true);
+ storageEngine->flushAllFiles(txn, true);
log() << "******" << endl;
}
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index e6dd1f8cbbe..04c5acb1dfa 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -91,7 +91,7 @@ public:
virtual Status dropIdent(OperationContext* opCtx, StringData ident) = 0;
// optional
- virtual int flushAllFiles(bool sync) {
+ virtual int flushAllFiles(OperationContext* txn, bool sync) {
return 0;
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index f825cb3a1c2..ee02c447bdf 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -251,8 +251,8 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
return Status::OK();
}
-int KVStorageEngine::flushAllFiles(bool sync) {
- return _engine->flushAllFiles(sync);
+int KVStorageEngine::flushAllFiles(OperationContext* txn, bool sync) {
+ return _engine->flushAllFiles(txn, sync);
}
Status KVStorageEngine::beginBackup(OperationContext* txn) {
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index 32bab2ee252..ba656ae85c1 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -93,7 +93,7 @@ public:
virtual Status dropDatabase(OperationContext* txn, StringData db);
- virtual int flushAllFiles(bool sync);
+ virtual int flushAllFiles(OperationContext* txn, bool sync);
virtual Status beginBackup(OperationContext* txn);
diff --git a/src/mongo/db/storage/mmap_v1/SConscript b/src/mongo/db/storage/mmap_v1/SConscript
index 4fa273fe408..ba8fd96c097 100644
--- a/src/mongo/db/storage/mmap_v1/SConscript
+++ b/src/mongo/db/storage/mmap_v1/SConscript
@@ -115,9 +115,11 @@ env.Library(
'mmap_${TARGET_OS_FAMILY}.cpp',
],
LIBDEPS=[
- 'file_allocator',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager',
+ '$BUILD_DIR/mongo/db/service_context',
+ '$BUILD_DIR/mongo/db/storage/storage_options',
'$BUILD_DIR/mongo/util/progress_meter',
- '$BUILD_DIR/mongo/db/storage/storage_options'
+ 'file_allocator',
],
)
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index ffca8474db3..173c2afceca 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -51,8 +51,10 @@ using std::endl;
using std::list;
using std::string;
-NamespaceIndex::NamespaceIndex(const std::string& dir, const std::string& database)
- : _dir(dir), _database(database), _ht(nullptr) {}
+NamespaceIndex::NamespaceIndex(OperationContext* txn,
+ const std::string& dir,
+ const std::string& database)
+ : _dir(dir), _database(database), _f(txn, MongoFile::Options::SEQUENTIAL), _ht(nullptr) {}
NamespaceIndex::~NamespaceIndex() {}
@@ -156,7 +158,7 @@ void NamespaceIndex::init(OperationContext* txn) {
void* p = 0;
if (boost::filesystem::exists(nsPath)) {
- if (_f.open(pathString)) {
+ if (_f.open(txn, pathString)) {
len = _f.length();
if (len % (1024 * 1024) != 0) {
@@ -215,7 +217,7 @@ void NamespaceIndex::init(OperationContext* txn) {
massert(18826, str::stream() << "failure writing file " << pathString, !file.bad());
}
- if (_f.create(pathString, l)) {
+ if (_f.create(txn, pathString, l)) {
// The writes done in this function must not be rolled back. This will leave the
// file empty, but available for future use. That is why we go directly to the
// global dur dirty list rather than going through the OperationContext.
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
index 8bcf836518c..51aae08ea61 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
@@ -51,9 +51,17 @@ class NamespaceIndex {
MONGO_DISALLOW_COPYING(NamespaceIndex);
public:
- NamespaceIndex(const std::string& dir, const std::string& database);
+ NamespaceIndex(OperationContext* txn, const std::string& dir, const std::string& database);
~NamespaceIndex();
+ /**
+ * Must be called before destruction.
+ */
+ void close(OperationContext* txn) {
+ LockMongoFilesExclusive lock(txn);
+ _f.close(txn);
+ }
+
/* returns true if the file represented by this file exists on disk */
bool pathExists() const;
@@ -86,7 +94,7 @@ private:
const std::string _dir;
const std::string _database;
- DurableMappedFile _f{MongoFile::Options::SEQUENTIAL};
+ DurableMappedFile _f;
std::unique_ptr<NamespaceHashTable> _ht;
};
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index c90ff1f74da..d81aa591817 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -104,14 +104,14 @@ int DataFile::_defaultSize() const {
}
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
-Status DataFile::openExisting(const char* filename) {
+Status DataFile::openExisting(OperationContext* txn, const char* filename) {
invariant(_mb == 0);
if (!boost::filesystem::exists(filename)) {
return Status(ErrorCodes::InvalidPath, "DataFile::openExisting - file does not exist");
}
- if (!mmf.open(filename)) {
+ if (!mmf.open(txn, filename)) {
return Status(ErrorCodes::InternalError, "DataFile::openExisting - mmf.open failed");
}
@@ -170,7 +170,7 @@ void DataFile::open(OperationContext* txn,
{
invariant(_mb == 0);
unsigned long long sz = size;
- if (mmf.create(filename, sz)) {
+ if (mmf.create(txn, filename, sz)) {
_mb = mmf.getView();
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index e28a5ebcc6e..57b5fb223f9 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -195,10 +195,10 @@ public:
class DataFile {
public:
- DataFile(int fn) : _fileNo(fn), _mb(NULL) {}
+ DataFile(OperationContext* txn, int fn) : _fileNo(fn), mmf(txn), _mb(NULL) {}
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
- Status openExisting(const char* filename);
+ Status openExisting(OperationContext* txn, const char* filename);
/** creates if DNE */
void open(OperationContext* txn,
@@ -206,6 +206,14 @@ public:
int requestedDataSize = 0,
bool preallocateOnly = false);
+ /**
+ * Must be called before destruction.
+ */
+ void close(OperationContext* txn) {
+ LockMongoFilesExclusive lock(txn);
+ mmf.close(txn);
+ }
+
DiskLoc allocExtentArea(OperationContext* txn, int size);
DataFileHeader* getHeader() {
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index 24be15045f0..e1bc51d29f3 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/diag_log.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
@@ -80,11 +81,12 @@ void DataFileSync::run() {
break;
}
+ auto txn = cc().makeOperationContext();
Date_t start = jsTime();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
dur::notifyPreDataFileFlush();
- int numFiles = storageEngine->flushAllFiles(true);
+ int numFiles = storageEngine->flushAllFiles(txn.get(), true);
dur::notifyPostDataFileFlush();
time_flushing = durationCount<Milliseconds>(jsTime() - start);
@@ -125,7 +127,7 @@ class MemJournalServerStatusMetric : public ServerStatusMetric {
public:
MemJournalServerStatusMetric() : ServerStatusMetric(".mem.mapped") {}
virtual void appendAtLeaf(BSONObjBuilder& b) const {
- int m = static_cast<int>(MemoryMappedFile::totalMappedLength() / (1024 * 1024));
+ int m = MemoryMappedFile::totalMappedLengthInMB();
b.appendNumber("mapped", m);
if (storageGlobalParams.dur) {
@@ -133,6 +135,5 @@ public:
b.appendNumber("mappedWithJournal", m);
}
}
-
} memJournalServerStatusMetric;
}
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index ab1d1988cba..1ed496c3a6d 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -204,7 +204,7 @@ public:
return false;
}
virtual void closingFileNotification() {}
- virtual void commitAndStopDurThread() {}
+ virtual void commitAndStopDurThread(OperationContext* txn) {}
};
@@ -226,7 +226,7 @@ public:
return true;
}
virtual void closingFileNotification();
- virtual void commitAndStopDurThread();
+ virtual void commitAndStopDurThread(OperationContext* txn);
void start(ClockSource* cs, int64_t serverStartMs);
@@ -318,7 +318,7 @@ void debugValidateFileMapsMatch(const DurableMappedFile* mmf) {
/**
* Main code of the remap private view function.
*/
-void remapPrivateViewImpl(double fraction) {
+void remapPrivateViewImpl(OperationContext* txn, double fraction) {
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
// There is no way that the set of files can change while we are in this method, because
@@ -335,9 +335,9 @@ void remapPrivateViewImpl(double fraction) {
// See SERVER-5680 to see why this code is necessary on Windows.
// See SERVER-8795 to see why this code is necessary on Solaris.
#if defined(_WIN32) || defined(__sun)
- LockMongoFilesExclusive lk;
+ LockMongoFilesExclusive lk(txn);
#else
- LockMongoFilesShared lk;
+ LockMongoFilesShared lk(txn);
#endif
std::set<MongoFile*>& files = MongoFile::getAllFiles();
@@ -381,7 +381,7 @@ void remapPrivateViewImpl(double fraction) {
}
if (mmf->willNeedRemap()) {
- mmf->remapThePrivateView();
+ mmf->remapThePrivateView(txn);
}
i++;
@@ -570,7 +570,7 @@ void DurableImpl::syncDataAndTruncateJournal(OperationContext* txn) {
commitNow(txn);
// Flush the shared view to disk.
- MongoFile::flushAll(true);
+ MongoFile::flushAll(txn, true);
// Once the shared view has been flushed, we do not need the journal files anymore.
journalCleanup(true);
@@ -588,7 +588,7 @@ void DurableImpl::closingFileNotification() {
}
}
-void DurableImpl::commitAndStopDurThread() {
+void DurableImpl::commitAndStopDurThread(OperationContext* txn) {
CommitNotifier::When when = commitNotify.now();
// There is always just one waiting anyways
@@ -600,7 +600,7 @@ void DurableImpl::commitAndStopDurThread() {
applyToDataFilesNotify.waitFor(when);
// Flush the shared view to disk.
- MongoFile::flushAll(true);
+ MongoFile::flushAll(txn, true);
// Once the shared view has been flushed, we do not need the journal files anymore.
journalCleanup(true);
@@ -630,14 +630,14 @@ void DurableImpl::start(ClockSource* cs, int64_t serverStartMs) {
* @param fraction Value between (0, 1] indicating what fraction of the memory to remap.
* Remapping too much or too frequently incurs copy-on-write page fault cost.
*/
-static void remapPrivateView(double fraction) {
+static void remapPrivateView(OperationContext* txn, double fraction) {
// Remapping private views must occur after WRITETODATAFILES otherwise we wouldn't see any
// newly written data on reads.
invariant(!commitJob.hasWritten());
try {
Timer t;
- remapPrivateViewImpl(fraction);
+ remapPrivateViewImpl(txn, fraction);
stats.curr()->_remapPrivateViewMicros += t.micros();
LOG(4) << "remapPrivateView end";
@@ -828,7 +828,7 @@ static void durThread(ClockSource* cs, int64_t serverStartMs) {
// accessing it. Technically this step could be avoided on systems, which
// support atomic remap.
autoFlushLock.upgradeFlushLockToExclusive();
- remapPrivateView(remapFraction);
+ remapPrivateView(txnPtr.get(), remapFraction);
autoFlushLock.release();
diff --git a/src/mongo/db/storage/mmap_v1/dur.h b/src/mongo/db/storage/mmap_v1/dur.h
index cf5e4ec2b14..e4aec954749 100644
--- a/src/mongo/db/storage/mmap_v1/dur.h
+++ b/src/mongo/db/storage/mmap_v1/dur.h
@@ -112,7 +112,7 @@ public:
*
* Must be called under the global X lock.
*/
- virtual void commitAndStopDurThread() = 0;
+ virtual void commitAndStopDurThread(OperationContext* txn) = 0;
/**
* Commits pending changes, flushes all changes to main data files, then removes the
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
index 4eb0b4ea16b..5c9fe117d52 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
@@ -56,12 +56,14 @@ namespace {
* (2) TODO should we do this using N threads? Would be quite easy see Hackenberg paper table
* 5 and 6. 2 threads might be a good balance.
*/
-void WRITETODATAFILES(const JSectHeader& h, const AlignedBuilder& uncompressed) {
+void WRITETODATAFILES(OperationContext* txn,
+ const JSectHeader& h,
+ const AlignedBuilder& uncompressed) {
Timer t;
LOG(4) << "WRITETODATAFILES BEGIN";
- RecoveryJob::get().processSection(&h, uncompressed.buf(), uncompressed.len(), NULL);
+ RecoveryJob::get().processSection(txn, &h, uncompressed.buf(), uncompressed.len(), NULL);
const long long m = t.micros();
stats.curr()->_writeToDataFilesMicros += m;
@@ -244,7 +246,7 @@ void JournalWriter::_journalWriterThread() {
// Apply the journal entries on top of the shared view so that when flush is
// requested it would write the latest.
- WRITETODATAFILES(buffer->_header, buffer->_builder);
+ WRITETODATAFILES(cc().makeOperationContext().get(), buffer->_header, buffer->_builder);
// Data is now persisted on the shared view, so notify any potential journal file
// cleanup waiters.
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index ec6b945455e..cdd3d4e3db2 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -58,6 +58,7 @@
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/scopeguard.h"
#include "mongo/util/startup_test.h"
namespace mongo {
@@ -264,22 +265,26 @@ RecoveryJob::RecoveryJob()
_appliedAnySections(false) {}
RecoveryJob::~RecoveryJob() {
- DESTRUCTOR_GUARD(if (!_mmfs.empty()) {} close();)
+ invariant(!"RecoveryJob is intentionally leaked with a bare call to operator new()");
}
-void RecoveryJob::close() {
+void RecoveryJob::close(OperationContext* txn) {
stdx::lock_guard<stdx::mutex> lk(_mx);
- _close();
+ _close(txn);
}
-void RecoveryJob::_close() {
- MongoFile::flushAll(true);
+void RecoveryJob::_close(OperationContext* txn) {
+ MongoFile::flushAll(txn, true);
+ LockMongoFilesExclusive lock(txn);
+ for (auto& durFile : _mmfs) {
+ durFile->close(txn);
+ }
_mmfs.clear();
}
-RecoveryJob::Last::Last() : mmf(NULL), fileNo(-1) {
+RecoveryJob::Last::Last(OperationContext* txn) : _txn(txn), mmf(NULL), fileNo(-1) {
// Make sure the files list does not change from underneath
- LockMongoFilesShared::assertAtLeastReadLocked();
+ LockMongoFilesShared::assertAtLeastReadLocked(txn);
}
DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& entry,
@@ -291,7 +296,7 @@ DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& en
string fn = fileName(entry.dbName, num);
MongoFile* file;
{
- MongoFileFinder finder; // must release lock before creating new DurableMappedFile
+ MongoFileFinder finder(_txn); // must release lock before creating new DurableMappedFile
file = finder.findByPath(fn);
}
@@ -303,8 +308,8 @@ DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& en
log() << "journal error applying writes, file " << fn << " is not open" << endl;
verify(false);
}
- std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile);
- verify(sp->open(fn));
+ std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile(_txn));
+ verify(sp->open(_txn, fn));
rj._mmfs.push_back(sp);
mmf = sp.get();
}
@@ -358,14 +363,14 @@ void RecoveryJob::applyEntry(Last& last, const ParsedJournalEntry& entry, bool a
}
if (apply) {
if (entry.op->needFilesClosed()) {
- _close(); // locked in processSection
+ _close(last.txn()); // locked in processSection
}
entry.op->replay();
}
}
}
-void RecoveryJob::applyEntries(const vector<ParsedJournalEntry>& entries) {
+void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournalEntry>& entries) {
const bool apply = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) == 0;
const bool dump = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal);
@@ -373,7 +378,7 @@ void RecoveryJob::applyEntries(const vector<ParsedJournalEntry>& entries) {
log() << "BEGIN section" << endl;
}
- Last last;
+ Last last(txn);
for (vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
applyEntry(last, *i, apply, dump);
}
@@ -383,11 +388,12 @@ void RecoveryJob::applyEntries(const vector<ParsedJournalEntry>& entries) {
}
}
-void RecoveryJob::processSection(const JSectHeader* h,
+void RecoveryJob::processSection(OperationContext* txn,
+ const JSectHeader* h,
const void* p,
unsigned len,
const JSectFooter* f) {
- LockMongoFilesShared lkFiles; // for RecoveryJob::Last
+ LockMongoFilesShared lkFiles(txn); // for RecoveryJob::Last
stdx::lock_guard<stdx::mutex> lk(_mx);
if (_recovering) {
@@ -461,14 +467,14 @@ void RecoveryJob::processSection(const JSectHeader* h,
}
// got all the entries for one group commit. apply them:
- applyEntries(entries);
+ applyEntries(txn, entries);
}
/** apply a specific journal file, that is already mmap'd
@param p start of the memory mapped file
@return true if this is detected to be the last file (ends abruptly)
*/
-bool RecoveryJob::processFileBuffer(const void* p, unsigned len) {
+bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsigned len) {
try {
unsigned long long fileId;
BufReader br(p, len);
@@ -523,7 +529,7 @@ bool RecoveryJob::processFileBuffer(const void* p, unsigned len) {
const char* hdr = (const char*)br.skip(h.sectionLenWithPadding());
const char* data = hdr + sizeof(JSectHeader);
const char* footer = data + dataLen;
- processSection((const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
+ processSection(txn, (const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
// ctrl c check
uassert(ErrorCodes::Interrupted,
@@ -544,7 +550,7 @@ bool RecoveryJob::processFileBuffer(const void* p, unsigned len) {
}
/** apply a specific journal file */
-bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
+bool RecoveryJob::processFile(OperationContext* txn, boost::filesystem::path journalfile) {
log() << "recover " << journalfile.string() << endl;
try {
@@ -558,16 +564,20 @@ bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
log() << "recover exception checking filesize" << endl;
}
- MemoryMappedFile f{MongoFile::Options::READONLY | MongoFile::Options::SEQUENTIAL};
- void* p = f.map(journalfile.string().c_str());
+ MemoryMappedFile f{txn, MongoFile::Options::READONLY | MongoFile::Options::SEQUENTIAL};
+ ON_BLOCK_EXIT([&f, &txn] {
+ LockMongoFilesExclusive lock(txn);
+ f.close(txn);
+ });
+ void* p = f.map(txn, journalfile.string().c_str());
massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
- return processFileBuffer(p, (unsigned)f.length());
+ return processFileBuffer(txn, p, (unsigned)f.length());
}
/** @param files all the j._0 style files we need to apply for recovery */
-void RecoveryJob::go(vector<boost::filesystem::path>& files) {
+void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& files) {
log() << "recover begin" << endl;
- LockMongoFilesExclusive lkFiles; // for RecoveryJob::Last
+ LockMongoFilesExclusive lkFiles(txn); // for RecoveryJob::Last
_recovering = true;
// load the last sequence number synced to the datafiles on disk before the last crash
@@ -575,11 +585,11 @@ void RecoveryJob::go(vector<boost::filesystem::path>& files) {
log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
for (unsigned i = 0; i != files.size(); ++i) {
- bool abruptEnd = processFile(files[i]);
+ bool abruptEnd = processFile(txn, files[i]);
if (abruptEnd && i + 1 < files.size()) {
log() << "recover error: abrupt end to file " << files[i].string()
<< ", yet it isn't the last journal file" << endl;
- close();
+ close(txn);
uasserted(13535, "recover abrupt journal file end");
}
}
@@ -590,7 +600,7 @@ void RecoveryJob::go(vector<boost::filesystem::path>& files) {
<< "Last skipped sections had sequence number " << _lastSeqSkipped;
}
- close();
+ close(txn);
if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) {
uasserted(13545,
@@ -605,7 +615,7 @@ void RecoveryJob::go(vector<boost::filesystem::path>& files) {
_recovering = false;
}
-void _recover() {
+void _recover(OperationContext* txn) {
verify(storageGlobalParams.dur);
boost::filesystem::path p = getJournalDir();
@@ -625,7 +635,7 @@ void _recover() {
return;
}
- RecoveryJob::get().go(journalFiles);
+ RecoveryJob::get().go(txn, journalFiles);
}
/** recover from a crash
@@ -635,18 +645,18 @@ void _recover() {
void replayJournalFilesAtStartup() {
// we use a lock so that exitCleanly will wait for us
// to finish (or at least to notice what is up and stop)
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite lk(txn.lockState());
+ auto txn = cc().makeOperationContext();
+ ScopedTransaction transaction(txn.get(), MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
- _recover(); // throws on interruption
+ _recover(txn.get()); // throws on interruption
}
struct BufReaderY {
int a, b;
};
class BufReaderUnitTest : public StartupTest {
+
public:
void run() {
BufReader r((void*)"abcdabcdabcd", 12);
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.h b/src/mongo/db/storage/mmap_v1/dur_recover.h
index b9b7c5ddfdb..9447044b607 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.h
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.h
@@ -34,6 +34,7 @@
#include <list>
#include <memory>
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
@@ -55,13 +56,17 @@ public:
RecoveryJob();
~RecoveryJob();
- void go(std::vector<boost::filesystem::path>& files);
+ void go(OperationContext* txn, std::vector<boost::filesystem::path>& files);
/** @param data data between header and footer. compressed if recovering. */
- void processSection(const JSectHeader* h, const void* data, unsigned len, const JSectFooter* f);
+ void processSection(OperationContext* txn,
+ const JSectHeader* h,
+ const void* data,
+ unsigned len,
+ const JSectFooter* f);
// locks and calls _close()
- void close();
+ void close(OperationContext* txn);
static RecoveryJob& get() {
return _instance;
@@ -70,10 +75,16 @@ public:
private:
class Last {
public:
- Last();
+ Last(OperationContext* txn);
+
DurableMappedFile* newEntry(const ParsedJournalEntry&, RecoveryJob&);
+ OperationContext* txn() {
+ return _txn;
+ }
+
private:
+ OperationContext* _txn;
DurableMappedFile* mmf;
std::string dbName;
int fileNo;
@@ -82,11 +93,10 @@ private:
void write(Last& last, const ParsedJournalEntry& entry); // actually writes to the file
void applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump);
- void applyEntries(const std::vector<ParsedJournalEntry>& entries);
- bool processFileBuffer(const void*, unsigned len);
- bool processFile(boost::filesystem::path journalfile);
- void _close(); // doesn't lock
-
+ void applyEntries(OperationContext* txn, const std::vector<ParsedJournalEntry>& entries);
+ bool processFileBuffer(OperationContext* txn, const void*, unsigned len);
+ bool processFile(OperationContext* txn, boost::filesystem::path journalfile);
+ void _close(OperationContext* txn); // doesn't lock
// Set of memory mapped files and a mutex to protect them
stdx::mutex _mx;
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index a7e8e2e429f..548cb8c9f05 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -61,7 +61,7 @@ using std::map;
using std::pair;
using std::string;
-void DurableMappedFile::remapThePrivateView() {
+void DurableMappedFile::remapThePrivateView(OperationContext* txn) {
verify(storageGlobalParams.dur);
_willNeedRemap = false;
@@ -70,7 +70,7 @@ void DurableMappedFile::remapThePrivateView() {
// so the remove / add isn't necessary and can be removed?
void* old = _view_private;
// privateViews.remove(_view_private);
- _view_private = remapPrivateView(_view_private);
+ _view_private = remapPrivateView(txn, _view_private);
// privateViews.add(_view_private, this);
fassert(16112, _view_private == old);
}
@@ -241,22 +241,24 @@ void DurableMappedFile::setPath(const std::string& f) {
_p = RelativePath::fromFullPath(storageGlobalParams.dbpath, prefix);
}
-bool DurableMappedFile::open(const std::string& fname) {
+bool DurableMappedFile::open(OperationContext* txn, const std::string& fname) {
LOG(3) << "mmf open " << fname;
invariant(!_view_write);
setPath(fname);
- _view_write = map(fname.c_str());
+ _view_write = map(txn, fname.c_str());
fassert(16333, _view_write);
return finishOpening();
}
-bool DurableMappedFile::create(const std::string& fname, unsigned long long& len) {
+bool DurableMappedFile::create(OperationContext* txn,
+ const std::string& fname,
+ unsigned long long& len) {
LOG(3) << "mmf create " << fname;
invariant(!_view_write);
setPath(fname);
- _view_write = map(fname.c_str(), len);
+ _view_write = map(txn, fname.c_str(), len);
fassert(16332, _view_write);
return finishOpening();
}
@@ -283,12 +285,7 @@ bool DurableMappedFile::finishOpening() {
return false;
}
-DurableMappedFile::DurableMappedFile(OptionSet options)
- : MemoryMappedFile(options), _willNeedRemap(false) {
- _view_write = _view_private = 0;
-}
-
-DurableMappedFile::~DurableMappedFile() {
+void DurableMappedFile::close(OperationContext* txn) {
try {
LOG(3) << "mmf close " << filename();
@@ -299,12 +296,20 @@ DurableMappedFile::~DurableMappedFile() {
getDur().closingFileNotification();
}
- LockMongoFilesExclusive lk;
privateViews.remove(_view_private, length());
- MemoryMappedFile::close();
+ MemoryMappedFile::close(txn);
} catch (...) {
- error() << "exception in ~DurableMappedFile";
+ error() << "exception in DurableMappedFile::close";
}
}
+
+DurableMappedFile::DurableMappedFile(OperationContext* txn, OptionSet options)
+ : MemoryMappedFile(txn, options), _willNeedRemap(false) {
+ _view_write = _view_private = 0;
+}
+
+DurableMappedFile::~DurableMappedFile() {
+ invariant(isClosed());
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index 3175fa8fa73..7c6e99bfc7e 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -32,6 +32,7 @@
#pragma once
#include "mongo/base/static_assert.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/paths.h"
#include "mongo/stdx/mutex.h"
@@ -50,11 +51,16 @@ protected:
}
public:
- DurableMappedFile(OptionSet options = NONE);
+ explicit DurableMappedFile(OperationContext* txn, OptionSet options = NONE);
virtual ~DurableMappedFile();
+ /**
+ * Callers must be holding a `LockMongoFilesExclusive`.
+ */
+ virtual void close(OperationContext* txn);
+
/** @return true if opened ok. */
- bool open(const std::string& fname);
+ bool open(OperationContext* txn, const std::string& fname);
/** @return file length */
unsigned long long length() const {
@@ -73,7 +79,7 @@ public:
passed length.
@return true for ok
*/
- bool create(const std::string& fname, unsigned long long& len);
+ bool create(OperationContext* txn, const std::string& fname, unsigned long long& len);
/* Get the "standard" view (which is the private one).
@return the private view.
@@ -117,7 +123,7 @@ public:
_willNeedRemap = true;
}
- void remapThePrivateView();
+ void remapThePrivateView(OperationContext* txn);
virtual bool isDurableMappedFile() {
return true;
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.h b/src/mongo/db/storage/mmap_v1/extent_manager.h
index 4ce623ffc8a..1ca0ab7b9f1 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.h
@@ -77,6 +77,8 @@ public:
virtual ~ExtentManager() {}
+ virtual void close(OperationContext* txn) = 0;
+
/**
* opens all current files
*/
diff --git a/src/mongo/db/storage/mmap_v1/mmap.cpp b/src/mongo/db/storage/mmap_v1/mmap.cpp
index c8eaacd2f63..bdce2bd6468 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap.cpp
@@ -36,8 +36,10 @@
#include <boost/filesystem/operations.hpp>
#include "mongo/base/owned_pointer_vector.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/locker.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/concurrency/rwlock.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
#include "mongo/util/mongoutils/str.h"
@@ -67,16 +69,33 @@ map<string, MongoFile*> pathToFile;
mongo::AtomicUInt64 mmfNextId(0);
} // namespace
-MemoryMappedFile::MemoryMappedFile(OptionSet options)
+MemoryMappedFile::MemoryMappedFile(OperationContext* txn, OptionSet options)
: MongoFile(options), _uniqueId(mmfNextId.fetchAndAdd(1)) {
- created();
+ created(txn);
}
-void* MemoryMappedFile::create(const std::string& filename, unsigned long long len, bool zero) {
+MemoryMappedFile::~MemoryMappedFile() {
+ invariant(isClosed());
+
+ auto txn = cc().getOperationContext();
+ invariant(txn);
+
+ LockMongoFilesShared lock(txn);
+ for (std::set<MongoFile*>::const_iterator it = mmfiles.begin(); it != mmfiles.end(); it++) {
+ invariant(*it != this);
+ }
+}
+
+/*static*/ AtomicUInt64 MemoryMappedFile::totalMappedLength;
+
+void* MemoryMappedFile::create(OperationContext* txn,
+ const std::string& filename,
+ unsigned long long len,
+ bool zero) {
uassert(13468,
string("can't create file already exists ") + filename,
!boost::filesystem::exists(filename));
- void* p = map(filename.c_str(), len);
+ void* p = map(txn, filename.c_str(), len);
fassert(16331, p);
if (zero) {
size_t sz = (size_t)len;
@@ -94,7 +113,7 @@ void* MemoryMappedFile::create(const std::string& filename, unsigned long long l
length = l;
}
-void* MemoryMappedFile::map(const char* filename) {
+void* MemoryMappedFile::map(OperationContext* txn, const char* filename) {
unsigned long long l;
try {
l = boost::filesystem::file_size(filename);
@@ -106,7 +125,7 @@ void* MemoryMappedFile::map(const char* filename) {
<< e.what());
}
- void* ret = map(filename, l);
+ void* ret = map(txn, filename, l);
fassert(16334, ret);
return ret;
}
@@ -119,7 +138,7 @@ MongoFile::MongoFile(OptionSet options)
: _options(storageGlobalParams.readOnly ? (options | READONLY) : options) {}
-RWLockRecursiveNongreedy LockMongoFilesShared::mmmutex("mmmutex", 10 * 60 * 1000 /* 10 minutes */);
+Lock::ResourceMutex LockMongoFilesShared::mmmutex("MMapMutex");
unsigned LockMongoFilesShared::era = 99; // note this rolls over
set<MongoFile*>& MongoFile::getAllFiles() {
@@ -131,14 +150,14 @@ set<MongoFile*>& MongoFile::getAllFiles() {
safe to call more than once, albeit might be wasted work
ideal to call close to the close, if the close is well before object destruction
*/
-void MongoFile::destroyed() {
- LockMongoFilesShared::assertExclusivelyLocked();
+void MongoFile::destroyed(OperationContext* txn) {
+ LockMongoFilesShared::assertExclusivelyLocked(txn);
mmfiles.erase(this);
pathToFile.erase(filename());
}
/*static*/
-void MongoFile::closeAllFiles(stringstream& message) {
+void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
static int closingAllFiles = 0;
if (closingAllFiles) {
message << "warning closingAllFiles=" << closingAllFiles << endl;
@@ -146,37 +165,26 @@ void MongoFile::closeAllFiles(stringstream& message) {
}
++closingAllFiles;
- LockMongoFilesExclusive lk;
+ LockMongoFilesExclusive lk(txn);
ProgressMeter pm(mmfiles.size(), 2, 1, "files", "File Closing Progress");
set<MongoFile*> temp = mmfiles;
for (set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++) {
- (*i)->close(); // close() now removes from mmfiles
+ (*i)->close(txn); // close() now removes from mmfiles
pm.hit();
}
message << "closeAllFiles() finished";
--closingAllFiles;
}
-/*static*/ long long MongoFile::totalMappedLength() {
- unsigned long long total = 0;
-
- LockMongoFilesShared lk;
-
- for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++)
- total += (*i)->length();
-
- return total;
-}
-
-/*static*/ int MongoFile::flushAll(bool sync) {
- return _flushAll(sync);
+/*static*/ int MongoFile::flushAll(OperationContext* txn, bool sync) {
+ return _flushAll(txn, sync);
}
-/*static*/ int MongoFile::_flushAll(bool sync) {
+/*static*/ int MongoFile::_flushAll(OperationContext* txn, bool sync) {
if (!sync) {
int num = 0;
- LockMongoFilesShared lk;
+ LockMongoFilesShared lk(txn);
for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
num++;
MongoFile* mmf = *i;
@@ -196,7 +204,7 @@ void MongoFile::closeAllFiles(stringstream& message) {
OwnedPointerVector<Flushable> thingsToFlushWrapper;
vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
{
- LockMongoFilesShared lk;
+ LockMongoFilesShared lk(txn);
for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
MongoFile* mmf = *i;
if (!mmf)
@@ -206,22 +214,22 @@ void MongoFile::closeAllFiles(stringstream& message) {
}
for (size_t i = 0; i < thingsToFlush.size(); i++) {
- thingsToFlush[i]->flush();
+ thingsToFlush[i]->flush(txn);
}
return thingsToFlush.size();
}
-void MongoFile::created() {
+void MongoFile::created(OperationContext* txn) {
// If we're a READONLY mapping, we don't want to ever flush.
if (!isOptionSet(READONLY)) {
- LockMongoFilesExclusive lk;
+ LockMongoFilesExclusive lk(txn);
mmfiles.insert(this);
}
}
-void MongoFile::setFilename(const std::string& fn) {
- LockMongoFilesExclusive lk;
+void MongoFile::setFilename(OperationContext* txn, const std::string& fn) {
+ LockMongoFilesExclusive lk(txn);
verify(_filename.empty());
_filename = boost::filesystem::absolute(fn).generic_string();
MongoFile*& ptf = pathToFile[_filename];
@@ -235,23 +243,6 @@ MongoFile* MongoFileFinder::findByPath(const std::string& path) const {
static_cast<MongoFile*>(NULL));
}
-
-void printMemInfo(const char* where) {
- LogstreamBuilder out = log();
- out << "mem info: ";
- if (where)
- out << where << " ";
-
- ProcessInfo pi;
- if (!pi.supported()) {
- out << " not supported";
- return;
- }
-
- out << "vsize: " << pi.getVirtualMemorySize() << " resident: " << pi.getResidentSize()
- << " mapped: " << (MemoryMappedFile::totalMappedLength() / (1024 * 1024));
-}
-
void dataSyncFailedHandler() {
log() << "error syncing data to disk, probably a disk error";
log() << " shutting down immediately to avoid corruption";
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index af559c7db63..fc28d56e1d9 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -33,7 +33,10 @@
#include <sstream>
#include <vector>
-#include "mongo/util/concurrency/rwlock.h"
+#include "mongo/base/disallow_copying.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/operation_context.h"
namespace mongo {
@@ -62,12 +65,24 @@ private:
// lock order: lock dbMutex before this if you lock both
class LockMongoFilesShared {
friend class LockMongoFilesExclusive;
- static RWLockRecursiveNongreedy mmmutex;
+ static Lock::ResourceMutex mmmutex;
static unsigned era;
- RWLockRecursive::Shared lk;
+
+ Lock::SharedLock lk;
public:
- LockMongoFilesShared() : lk(mmmutex) {}
+ explicit LockMongoFilesShared(OperationContext* txn) : lk(txn->lockState(), mmmutex) {
+ // JS worker threads may not have cc() setup, as they work on behalf of other clients
+ dassert(txn == cc().getOperationContext() || !cc().getOperationContext());
+ }
+
+ static void assertExclusivelyLocked(OperationContext* txn) {
+ invariant(mmmutex.isExclusivelyLocked(txn->lockState()));
+ }
+
+ static void assertAtLeastReadLocked(OperationContext* txn) {
+ invariant(mmmutex.isAtLeastReadLocked(txn->lockState()));
+ }
/** era changes anytime memory maps come and go. thus you can use this as a cheap way to check
if nothing has changed since the last time you locked. Of course you must be shared locked
@@ -78,20 +93,16 @@ public:
static unsigned getEra() {
return era;
}
-
- static void assertExclusivelyLocked() {
- mmmutex.assertExclusivelyLocked();
- }
- static void assertAtLeastReadLocked() {
- mmmutex.assertAtLeastReadLocked();
- }
};
class LockMongoFilesExclusive {
- RWLockRecursive::Exclusive lk;
+ Lock::ExclusiveLock lk;
public:
- LockMongoFilesExclusive() : lk(LockMongoFilesShared::mmmutex) {
+ explicit LockMongoFilesExclusive(OperationContext* txn)
+ : lk(txn->lockState(), LockMongoFilesShared::mmmutex) {
+ // JS worker threads may not have cc() setup, as they work on behalf of other clients
+ dassert(txn == cc().getOperationContext() || !cc().getOperationContext());
LockMongoFilesShared::era++;
}
};
@@ -105,7 +116,7 @@ public:
class Flushable {
public:
virtual ~Flushable() {}
- virtual void flush() = 0;
+ virtual void flush(OperationContext* txn) = 0;
};
enum Options {
@@ -124,7 +135,7 @@ public:
called from within a mutex that MongoFile uses. so be careful not to deadlock.
*/
template <class F>
- static void forEach(F fun);
+ static void forEach(OperationContext* txn, F fun);
/**
* note: you need to be in mmmutex when using this. forEach (above) handles that for you
@@ -132,9 +143,8 @@ public:
*/
static std::set<MongoFile*>& getAllFiles();
- static int flushAll(bool sync); // returns n flushed
- static long long totalMappedLength();
- static void closeAllFiles(std::stringstream& message);
+ static int flushAll(OperationContext* txn, bool sync); // returns n flushed
+ static void closeAllFiles(OperationContext* txn, std::stringstream& message);
virtual bool isDurableMappedFile() {
return false;
@@ -143,17 +153,20 @@ public:
std::string filename() const {
return _filename;
}
- void setFilename(const std::string& fn);
+ void setFilename(OperationContext* txn, const std::string& fn);
virtual uint64_t getUniqueId() const = 0;
private:
std::string _filename;
- static int _flushAll(bool sync); // returns n flushed
+ static int _flushAll(OperationContext* txn, bool sync); // returns n flushed
const OptionSet _options;
protected:
- virtual void close() = 0;
+ /**
+ * Implementations may assume this is called from within `LockMongoFilesExclusive`.
+ */
+ virtual void close(OperationContext* txn) = 0;
virtual void flush(bool sync) = 0;
/**
* returns a thread safe object that you can call flush on
@@ -161,14 +174,22 @@ protected:
*/
virtual Flushable* prepareFlush() = 0;
- void created(); /* subclass must call after create */
+ /**
+ * Returns true iff the file is closed.
+ */
+ virtual bool isClosed() = 0;
+
+ void created(OperationContext* txn); /* subclass must call after create */
- /* subclass must call in destructor (or at close).
- removes this from pathToFile and other maps
- safe to call more than once, albeit might be wasted work
- ideal to call close to the close, if the close is well before object destruction
- */
- void destroyed();
+ /**
+ * Implementations may assume this is called from within `LockMongoFilesExclusive`.
+ *
+ * subclass must call in destructor (or at close).
+ * removes this from pathToFile and other maps
+ * safe to call more than once, albeit might be wasted work
+ * ideal to call close to the close, if the close is well before object destruction
+ */
+ void destroyed(OperationContext* txn);
virtual unsigned long long length() const = 0;
@@ -187,7 +208,7 @@ class MongoFileFinder {
MONGO_DISALLOW_COPYING(MongoFileFinder);
public:
- MongoFileFinder() {}
+ MongoFileFinder(OperationContext* txn) : _lk(txn) {}
/** @return The MongoFile object associated with the specified file name. If no file is open
with the specified name, returns null.
@@ -208,27 +229,33 @@ protected:
}
public:
- MemoryMappedFile(OptionSet options = NONE);
+ MemoryMappedFile(OperationContext* txn, OptionSet options = NONE);
- virtual ~MemoryMappedFile() {
- LockMongoFilesExclusive lk;
- close();
- }
+ virtual ~MemoryMappedFile();
- virtual void close();
+ /**
+ * Callers must be holding a `LockMongoFilesExclusive`.
+ */
+ virtual void close(OperationContext* txn);
/**
* uasserts if file doesn't exist. fasserts on mmap error.
*/
- void* map(const char* filename);
+ void* map(OperationContext* txn, const char* filename);
/**
* uasserts if file exists. fasserts on mmap error.
* @param zero fill file with zeros when true
*/
- void* create(const std::string& filename, unsigned long long len, bool zero);
+ void* create(OperationContext* txn,
+ const std::string& filename,
+ unsigned long long len,
+ bool zero);
void flush(bool sync);
+
+ virtual bool isClosed();
+
virtual Flushable* prepareFlush();
long shortLength() const {
@@ -251,6 +278,10 @@ public:
return _uniqueId;
}
+ static int totalMappedLengthInMB() {
+ return static_cast<int>(totalMappedLength.load() / 1024 / 1024);
+ }
+
private:
static void updateLength(const char* filename, unsigned long long& length);
@@ -258,6 +289,7 @@ private:
HANDLE maphandle = 0;
std::vector<void*> views;
unsigned long long len = 0u;
+ static AtomicUInt64 totalMappedLength;
const uint64_t _uniqueId;
#ifdef _WIN32
// flush Mutex
@@ -275,18 +307,18 @@ protected:
* Creates with length if DNE, otherwise validates input length. Returns nullptr on mmap
* error.
*/
- void* map(const char* filename, unsigned long long& length);
+ void* map(OperationContext* txn, const char* filename, unsigned long long& length);
/**
* Close the current private view and open a new replacement. Returns nullptr on mmap error.
*/
- void* remapPrivateView(void* oldPrivateAddr);
+ void* remapPrivateView(OperationContext* txn, void* oldPrivateAddr);
};
/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
template <class F>
-inline void MongoFile::forEach(F p) {
- LockMongoFilesShared lklk;
+inline void MongoFile::forEach(OperationContext* txn, F p) {
+ LockMongoFilesShared lklk(txn);
const std::set<MongoFile*>& mmfiles = MongoFile::getAllFiles();
for (std::set<MongoFile*>::const_iterator i = mmfiles.begin(); i != mmfiles.end(); i++)
p(*i);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
index e63e74923a7..02589421b44 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
@@ -54,6 +54,23 @@ using namespace mongoutils;
namespace mongo {
+
+namespace {
+void printMemInfo() {
+ LogstreamBuilder out = log();
+ out << "mem info: ";
+
+ ProcessInfo pi;
+ if (!pi.supported()) {
+ out << " not supported";
+ return;
+ }
+
+ out << "vsize: " << pi.getVirtualMemorySize() << " resident: " << pi.getResidentSize()
+ << " mapped: " << MemoryMappedFile::totalMappedLengthInMB();
+}
+} // namespace
+
static size_t fetchMinOSPageSizeBytes() {
size_t minOSPageSizeBytes = sysconf(_SC_PAGESIZE);
minOSPageSizeBytesTest(minOSPageSizeBytes);
@@ -62,17 +79,19 @@ static size_t fetchMinOSPageSizeBytes() {
const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
-void MemoryMappedFile::close() {
- LockMongoFilesShared::assertExclusivelyLocked();
+void MemoryMappedFile::close(OperationContext* txn) {
for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
munmap(*i, len);
}
views.clear();
+ totalMappedLength.fetchAndSubtract(len);
+ len = 0;
- if (fd)
+ if (fd) {
::close(fd);
- fd = 0;
- destroyed(); // cleans up from the master list of mmaps
+ fd = 0;
+ }
+ destroyed(txn); // cleans up from the master list of mmaps
}
#ifndef O_NOATIME
@@ -140,11 +159,12 @@ MAdvise::~MAdvise() {
}
#endif
-void* MemoryMappedFile::map(const char* filename, unsigned long long& length) {
+void* MemoryMappedFile::map(OperationContext* txn,
+ const char* filename,
+ unsigned long long& length) {
// length may be updated by callee.
- setFilename(filename);
+ setFilename(txn, filename);
FileAllocator::get()->allocateAsap(filename, length);
- len = length;
const bool readOnly = isOptionSet(READONLY);
@@ -193,6 +213,10 @@ void* MemoryMappedFile::map(const char* filename, unsigned long long& length) {
}
#endif
+ // MemoryMappedFile successfully created, now update state.
+ len = length;
+ MemoryMappedFile::totalMappedLength.fetchAndAdd(len);
+
views.push_back(view);
return view;
@@ -219,9 +243,9 @@ void* MemoryMappedFile::createPrivateMap() {
return x;
}
-void* MemoryMappedFile::remapPrivateView(void* oldPrivateAddr) {
+void* MemoryMappedFile::remapPrivateView(OperationContext* txn, void* oldPrivateAddr) {
#if defined(__sun) // SERVER-8795
- LockMongoFilesExclusive lockMongoFiles;
+ LockMongoFilesExclusive lockMongoFiles(txn);
#endif
// don't unmap, just mmap over the old region
@@ -255,12 +279,16 @@ void MemoryMappedFile::flush(bool sync) {
}
}
+bool MemoryMappedFile::isClosed() {
+ return !len && !fd && !views.size();
+}
+
class PosixFlushable : public MemoryMappedFile::Flushable {
public:
PosixFlushable(MemoryMappedFile* theFile, void* view, HANDLE fd, long len)
: _theFile(theFile), _view(view), _fd(fd), _len(len), _id(_theFile->getUniqueId()) {}
- void flush() {
+ void flush(OperationContext* txn) {
if (_view == NULL || _fd == 0)
return;
@@ -275,7 +303,7 @@ public:
}
// some error, lets see if we're supposed to exist
- LockMongoFilesShared mmfilesLock;
+ LockMongoFilesShared mmfilesLock(txn);
std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
if ((it == mmfs.end()) || ((*it)->getUniqueId() != _id)) {
@@ -301,5 +329,4 @@ MemoryMappedFile::Flushable* MemoryMappedFile::prepareFlush() {
return new PosixFlushable(this, viewForFlushing(), fd, len);
}
-
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index f27dcc935e8..69978fb4b53 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -56,6 +56,7 @@
#include "mongo/db/storage/mmap_v1/record_store_v1_simple.h"
#include "mongo/db/storage/record_data.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -162,8 +163,12 @@ MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* txn,
std::unique_ptr<ExtentManager> extentManager)
: DatabaseCatalogEntry(name),
_path(path.toString()),
- _namespaceIndex(_path, name.toString()),
+ _namespaceIndex(txn, _path, name.toString()),
_extentManager(std::move(extentManager)) {
+ ScopeGuard onErrorClose = MakeGuard([&] {
+ _namespaceIndex.close(txn);
+ _extentManager->close(txn);
+ });
massert(34469,
str::stream() << name << " is not a valid database name",
NamespaceString::validDBName(name));
@@ -193,6 +198,8 @@ MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* txn,
warning() << "database " << path << " " << name << " could not be opened " << e.what();
throw;
}
+
+ onErrorClose.Dismiss();
}
MMAPV1DatabaseCatalogEntry::~MMAPV1DatabaseCatalogEntry() {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index 5934ded8a11..ea4342bb868 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -62,6 +62,14 @@ public:
virtual ~MMAPV1DatabaseCatalogEntry();
+ /**
+ * Must be called before destruction.
+ */
+ virtual void close(OperationContext* txn) {
+ _extentManager->close(txn);
+ _namespaceIndex.close(txn);
+ }
+
// these two seem the same and yet different
// TODO(ERH): consolidate into one ideally
virtual bool exists() const {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index e185afe03c7..36af8f3f06a 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -36,7 +36,9 @@
#include <boost/filesystem/path.hpp>
#include <fstream>
+#include "mongo/db/client.h"
#include "mongo/db/mongod_options.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/data_file_sync.h"
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
@@ -311,6 +313,9 @@ Status MMAPV1Engine::closeDatabase(OperationContext* txn, StringData db) {
stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
+ if (entry) {
+ entry->close(txn);
+ }
delete entry;
_entryMap.erase(db.toString());
return Status::OK();
@@ -345,8 +350,8 @@ void MMAPV1Engine::_listDatabases(const std::string& directory, std::vector<std:
}
}
-int MMAPV1Engine::flushAllFiles(bool sync) {
- return MongoFile::flushAll(sync);
+int MMAPV1Engine::flushAllFiles(OperationContext* txn, bool sync) {
+ return MongoFile::flushAll(txn, sync);
}
Status MMAPV1Engine::beginBackup(OperationContext* txn) {
@@ -374,21 +379,32 @@ void MMAPV1Engine::cleanShutdown() {
// we would only hang here if the file_allocator code generates a
// synchronous signal, which we don't expect
log() << "shutdown: waiting for fs preallocator..." << endl;
+ auto txn = cc().getOperationContext();
+
+ // In some cases we may shutdown early before we have any operation context yet, but we need
+ // one for synchronization purposes.
+ ServiceContext::UniqueOperationContext newTxn;
+ if (!txn) {
+ newTxn = cc().makeOperationContext();
+ txn = newTxn.get();
+ invariant(txn);
+ }
+
FileAllocator::get()->waitUntilFinished();
if (storageGlobalParams.dur) {
log() << "shutdown: final commit..." << endl;
- getDur().commitAndStopDurThread();
+ getDur().commitAndStopDurThread(txn);
}
log() << "shutdown: closing all files..." << endl;
stringstream ss3;
- MemoryMappedFile::closeAllFiles(ss3);
+ MemoryMappedFile::closeAllFiles(txn, ss3);
log() << ss3.str() << endl;
}
void MMAPV1Engine::setJournalListener(JournalListener* jl) {
dur::setJournalListener(jl);
}
-}
+} // namespace
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index 54e8594e053..b5d19950d7b 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -57,7 +57,7 @@ public:
RecoveryUnit* newRecoveryUnit();
void listDatabases(std::vector<std::string>* out) const;
- int flushAllFiles(bool sync);
+ int flushAllFiles(OperationContext* txn, bool sync);
Status beginBackup(OperationContext* txn);
void endBackup(OperationContext* txn);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 761fecfb075..3f9b6019802 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -79,9 +79,9 @@ class MmapV1RecordFetcher : public RecordFetcher {
public:
explicit MmapV1RecordFetcher(const MmapV1RecordHeader* record) : _record(record) {}
- virtual void setup() {
+ virtual void setup(OperationContext* txn) {
invariant(!_filesLock.get());
- _filesLock.reset(new LockMongoFilesShared());
+ _filesLock.reset(new LockMongoFilesShared(txn));
}
virtual void fetch() {
@@ -172,10 +172,11 @@ Status MmapV1ExtentManager::init(OperationContext* txn) {
}
}
- unique_ptr<DataFile> df(new DataFile(n));
+ unique_ptr<DataFile> df(new DataFile(txn, n));
- Status s = df->openExisting(fullNameString.c_str());
+ Status s = df->openExisting(txn, fullNameString.c_str());
if (!s.isOK()) {
+ df->close(txn);
return s;
}
@@ -240,12 +241,17 @@ DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
}
{
- unique_ptr<DataFile> allocFile(new DataFile(allocFileId));
+ unique_ptr<DataFile> allocFile(new DataFile(txn, allocFileId));
const string allocFileName = _fileName(allocFileId).string();
Timer t;
- allocFile->open(txn, allocFileName.c_str(), minSize, false);
+ try {
+ allocFile->open(txn, allocFileName.c_str(), minSize, false);
+ } catch (...) {
+ allocFile->close(txn);
+ throw;
+ }
if (t.seconds() > 1) {
log() << "MmapV1ExtentManager took " << t.seconds()
<< " seconds to open: " << allocFileName;
@@ -257,10 +263,15 @@ DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
// Preallocate is asynchronous
if (preallocateNextFile) {
- unique_ptr<DataFile> nextFile(new DataFile(allocFileId + 1));
+ unique_ptr<DataFile> nextFile(new DataFile(txn, allocFileId + 1));
const string nextFileName = _fileName(allocFileId + 1).string();
- nextFile->open(txn, nextFileName.c_str(), minSize, false);
+ try {
+ nextFile->open(txn, nextFileName.c_str(), minSize, false);
+ } catch (...) {
+ nextFile->close(txn);
+ throw;
+ }
}
// Returns the last file added
@@ -633,6 +644,12 @@ MmapV1ExtentManager::FilesArray::~FilesArray() {
}
}
+void MmapV1ExtentManager::FilesArray::close(OperationContext* txn) {
+ for (int i = 0; i < size(); i++) {
+ _files[i]->close(txn);
+ }
+}
+
void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
stdx::lock_guard<stdx::mutex> lk(_writersMutex);
const int n = _size.load();
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index 573396f76af..fb891ee8227 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -90,6 +90,13 @@ public:
MmapV1ExtentManager(StringData dbname, StringData path, bool directoryPerDB);
/**
+ * Must be called before destruction.
+ */
+ void close(OperationContext* txn) {
+ _files.close(txn);
+ }
+
+ /**
* opens all current files, not thread safe
*/
Status init(OperationContext* txn);
@@ -210,6 +217,11 @@ private:
~FilesArray();
/**
+ * Must be called before destruction.
+ */
+ void close(OperationContext* txn);
+
+ /**
* Returns file at location 'n' in the array, with 'n' less than number of files added.
* Will always return the same pointer for a given file.
*/
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index 0922e5de7ea..d8e8d61e624 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -148,8 +148,8 @@ static void* getNextMemoryMappedFileLocation(unsigned long long mmfSize) {
return reinterpret_cast<void*>(static_cast<uintptr_t>(thisMemoryMappedFileLocation));
}
-void MemoryMappedFile::close() {
- LockMongoFilesShared::assertExclusivelyLocked();
+void MemoryMappedFile::close(OperationContext* txn) {
+ LockMongoFilesShared::assertExclusivelyLocked(txn);
// Prevent flush and close from concurrently running
stdx::lock_guard<stdx::mutex> lk(_flushMutex);
@@ -163,20 +163,29 @@ void MemoryMappedFile::close() {
}
views.clear();
+ totalMappedLength.fetchAndSubtract(len);
+ len = 0;
+
if (maphandle)
CloseHandle(maphandle);
maphandle = 0;
- if (fd)
+ if (fd) {
CloseHandle(fd);
- fd = 0;
- destroyed(); // cleans up from the master list of mmaps
+ fd = 0;
+ }
+
+ destroyed(txn); // cleans up from the master list of mmaps
}
-unsigned long long mapped = 0;
+bool MemoryMappedFile::isClosed() {
+ return !len && !fd && !views.size();
+}
-void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length) {
+void* MemoryMappedFile::map(OperationContext* txn,
+ const char* filenameIn,
+ unsigned long long& length) {
verify(fd == 0 && len == 0); // can't open more than once
- setFilename(filenameIn);
+ setFilename(txn, filenameIn);
FileAllocator::get()->allocateAsap(filenameIn, length);
/* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary
* perhaps. */
@@ -222,8 +231,6 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length)
}
}
- mapped += length;
-
{
DWORD flProtect = readOnly ? PAGE_READONLY : PAGE_READWRITE;
maphandle = CreateFileMappingW(fd,
@@ -237,7 +244,8 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length)
severe() << "CreateFileMappingW for " << filename << " failed with "
<< errnoWithDescription(dosError) << " (file size is " << length << ")"
<< " in MemoryMappedFile::map" << endl;
- close();
+ LockMongoFilesExclusive lock(txn);
+ close(txn);
fassertFailed(16225);
}
}
@@ -288,7 +296,8 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length)
<< length << ")"
<< " in MemoryMappedFile::map" << endl;
- close();
+ LockMongoFilesExclusive lock(txn);
+ close(txn);
fassertFailed(16166);
}
@@ -296,8 +305,12 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length)
}
}
- views.push_back(view);
+ // MemoryMappedFile successfully created, now update state.
len = length;
+ totalMappedLength.fetchAndAdd(len);
+
+ views.push_back(view);
+
return view;
}
@@ -346,8 +359,8 @@ void* MemoryMappedFile::createPrivateMap() {
return privateMapAddress;
}
-void* MemoryMappedFile::remapPrivateView(void* oldPrivateAddr) {
- LockMongoFilesExclusive lockMongoFiles;
+void* MemoryMappedFile::remapPrivateView(OperationContext* txn, void* oldPrivateAddr) {
+ LockMongoFilesExclusive lockMongoFiles(txn);
privateViews.clearWritableBits(oldPrivateAddr, len);
@@ -393,12 +406,12 @@ public:
_filename(filename),
_flushMutex(flushMutex) {}
- void flush() {
+ void flush(OperationContext* txn) {
if (!_view || !_fd)
return;
{
- LockMongoFilesShared mmfilesLock;
+ LockMongoFilesShared mmfilesLock(txn);
std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
@@ -462,7 +475,9 @@ void MemoryMappedFile::flush(bool sync) {
uassert(13056, "Async flushing not supported on windows", sync);
if (!views.empty()) {
WindowsFlushable f(this, viewForFlushing(), fd, _uniqueId, filename(), _flushMutex);
- f.flush();
+ auto txn = cc().getOperationContext();
+ invariant(txn);
+ f.flush(txn);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index 0c56ef9e6f1..6f4d3993cbe 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -204,6 +204,8 @@ DummyExtentManager::~DummyExtentManager() {
}
}
+void DummyExtentManager::close(OperationContext* txn) {}
+
Status DummyExtentManager::init(OperationContext* txn) {
return Status::OK();
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
index 1f29b59334c..eac135dd24a 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
@@ -113,6 +113,8 @@ class DummyExtentManager : public ExtentManager {
public:
virtual ~DummyExtentManager();
+ virtual void close(OperationContext* txn);
+
virtual Status init(OperationContext* txn);
virtual int numFiles() const;
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index d82a89031d6..ea76462eaad 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -253,7 +253,7 @@ public:
getDur().syncDataAndTruncateJournal(_txn);
// need both in case journaling is disabled
- MongoFile::flushAll(true);
+ MongoFile::flushAll(_txn, true);
MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(_path));
} catch (DBException& e) {
@@ -319,7 +319,10 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
unique_ptr<Database> tempDatabase;
// Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files
- ON_BLOCK_EXIT(&dur::DurableInterface::syncDataAndTruncateJournal, &getDur(), txn);
+ ON_BLOCK_EXIT([&dbEntry, &txn] {
+ getDur().syncDataAndTruncateJournal(txn);
+ dbEntry->close(txn);
+ });
{
dbEntry.reset(new MMAPV1DatabaseCatalogEntry(
@@ -431,7 +434,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
getDur().syncDataAndTruncateJournal(txn);
// need both in case journaling is disabled
- MongoFile::flushAll(true);
+ MongoFile::flushAll(txn, true);
txn->checkForInterrupt();
}
diff --git a/src/mongo/db/storage/record_fetcher.h b/src/mongo/db/storage/record_fetcher.h
index 66c626ea4d5..e133e28bdf0 100644
--- a/src/mongo/db/storage/record_fetcher.h
+++ b/src/mongo/db/storage/record_fetcher.h
@@ -30,6 +30,8 @@
namespace mongo {
+class OperationContext;
+
/**
* Used for yielding while data is fetched from disk.
*
@@ -42,7 +44,7 @@ public:
/**
* Performs any setup which is needed prior to yielding locks.
*/
- virtual void setup() = 0;
+ virtual void setup(OperationContext* txn) = 0;
/**
* Called after locks are yielded in order to bring data into memory.
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index f3dc6b1bf0f..02e9c1ff7aa 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -207,7 +207,7 @@ public:
/**
* @return number of files flushed
*/
- virtual int flushAllFiles(bool sync) = 0;
+ virtual int flushAllFiles(OperationContext* txn, bool sync) = 0;
/**
* Transitions the storage engine into backup mode.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 0c3c006a6fa..682fd842aaf 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -401,7 +401,7 @@ Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) {
return wtRCToStatus(session->salvage(session, uri, NULL), "Salvage failed:");
}
-int WiredTigerKVEngine::flushAllFiles(bool sync) {
+int WiredTigerKVEngine::flushAllFiles(OperationContext* txn, bool sync) {
LOG(1) << "WiredTigerKVEngine::flushAllFiles";
if (_ephemeral) {
return 0;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index f46b677f218..8f632ef537c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -108,7 +108,7 @@ public:
StringData ident,
const RecordStore* originalRecordStore) const;
- virtual int flushAllFiles(bool sync);
+ virtual int flushAllFiles(OperationContext* txn, bool sync);
virtual Status beginBackup(OperationContext* txn);
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 6886bed64ca..77a90f0e396 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -199,7 +199,7 @@ Status waitForWriteConcern(OperationContext* txn,
case WriteConcernOptions::SyncMode::FSYNC: {
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
if (!storageEngine->isDurable()) {
- result->fsyncFiles = storageEngine->flushAllFiles(true);
+ result->fsyncFiles = storageEngine->flushAllFiles(txn, true);
} else {
// We only need to commit the journal if we're durable
txn->recoveryUnit()->waitUntilDurable();
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index 5db9ef95dec..558b61d316a 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -124,7 +124,6 @@ dbtest = env.Program(
"$BUILD_DIR/mongo/db/serveronly",
"$BUILD_DIR/mongo/db/logical_clock",
"$BUILD_DIR/mongo/db/storage/paths",
- "$BUILD_DIR/mongo/util/concurrency/rwlock",
"$BUILD_DIR/mongo/util/net/network",
"$BUILD_DIR/mongo/util/progress_meter",
"$BUILD_DIR/mongo/util/version_impl",
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
index 2090e66d4b5..ab6766b72c3 100644
--- a/src/mongo/dbtests/mmaptests.cpp
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/util/scopeguard.h"
#include "mongo/util/timer.h"
namespace MMapTests {
@@ -76,11 +77,16 @@ public:
MMAPV1LockerImpl lockState;
Lock::GlobalWrite lk(&lockState);
+ auto txn = cc().makeOperationContext();
{
- DurableMappedFile f;
+ DurableMappedFile f(txn.get());
+ ON_BLOCK_EXIT([&f, &txn] {
+ LockMongoFilesExclusive lock(txn.get());
+ f.close(txn.get());
+ });
unsigned long long len = 256 * 1024 * 1024;
- verify(f.create(fn, len));
+ verify(f.create(txn.get(), fn, len));
{
char* p = (char*)f.getView();
verify(p);
@@ -93,12 +99,12 @@ public:
char* w = (char*)f.view_write();
strcpy(w + 6, "world");
}
- MongoFileFinder ff;
+ MongoFileFinder ff(txn.get());
ASSERT(ff.findByPath(fn));
ASSERT(ff.findByPath("asdf") == 0);
}
{
- MongoFileFinder ff;
+ MongoFileFinder ff(txn.get());
ASSERT(ff.findByPath(fn) == 0);
}
@@ -112,9 +118,13 @@ public:
Timer t;
for (int i = 0; i < N; i++) {
// Every 4 iterations we pass the sequential hint.
- DurableMappedFile f{i % 4 == 1 ? MongoFile::Options::SEQUENTIAL
- : MongoFile::Options::NONE};
- verify(f.open(fn));
+ DurableMappedFile f{
+ txn.get(), i % 4 == 1 ? MongoFile::Options::SEQUENTIAL : MongoFile::Options::NONE};
+ ON_BLOCK_EXIT([&f, &txn] {
+ LockMongoFilesExclusive lock(txn.get());
+ f.close(txn.get());
+ });
+ verify(f.open(txn.get(), fn));
{
char* p = (char*)f.getView();
verify(p);
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 498e39f71e7..d73267ce7ce 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -45,7 +45,6 @@
#include "mongo/stdx/functional.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/old_thread_pool.h"
-#include "mongo/util/concurrency/rwlock.h"
#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/log.h"
#include "mongo/util/timer.h"
@@ -140,237 +139,6 @@ public:
}
};
-class RWLockTest1 {
-public:
- void run() {
- RWLock lk("eliot");
- { rwlock r(lk, true, 1000); }
- }
-};
-
-class RWLockTest2 {
-public:
- static void worker1(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
- x->fetchAndAdd(1); // 1
- RWLockRecursiveNongreedy::Exclusive b(*lk);
- x->fetchAndAdd(1); // 2
- }
- static void worker2(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
- RWLockRecursiveNongreedy::Shared c(*lk);
- x->fetchAndAdd(1);
- }
- void run() {
- /**
- * note: this test will deadlock if the code breaks
- */
- RWLockRecursiveNongreedy lk("eliot2", 120 * 1000);
- cout << "RWLock impl: " << lk.implType() << endl;
- unique_ptr<RWLockRecursiveNongreedy::Shared> a(new RWLockRecursiveNongreedy::Shared(lk));
- AtomicUInt32 x1(0);
- cout << "A : " << &x1 << endl;
- stdx::thread t1(stdx::bind(worker1, &lk, &x1));
- while (!x1.load())
- ;
- verify(x1.load() == 1);
- sleepmillis(500);
- verify(x1.load() == 1);
- AtomicUInt32 x2(0);
- stdx::thread t2(stdx::bind(worker2, &lk, &x2));
- t2.join();
- verify(x2.load() == 1);
- a.reset();
- for (int i = 0; i < 2000; i++) {
- if (x1.load() == 2)
- break;
- sleepmillis(1);
- }
- verify(x1.load() == 2);
- t1.join();
- }
-};
-
-class RWLockTest3 {
-public:
- static void worker2(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
- verify(!lk->__lock_try(0));
- RWLockRecursiveNongreedy::Shared c(*lk);
- x->fetchAndAdd(1);
- }
-
- void run() {
- /**
- * note: this test will deadlock if the code breaks
- */
-
- RWLockRecursiveNongreedy lk("eliot2", 120 * 1000);
-
- unique_ptr<RWLockRecursiveNongreedy::Shared> a(new RWLockRecursiveNongreedy::Shared(lk));
-
- AtomicUInt32 x2(0);
-
- stdx::thread t2(stdx::bind(worker2, &lk, &x2));
- t2.join();
- verify(x2.load() == 1);
-
- a.reset();
- }
-};
-
-class RWLockTest4 {
-public:
-#if defined(__linux__) || defined(__APPLE__)
- static void worker1(pthread_rwlock_t* lk, AtomicUInt32* x) {
- x->fetchAndAdd(1); // 1
- cout << "lock b try" << endl;
- while (1) {
- if (pthread_rwlock_trywrlock(lk) == 0)
- break;
- sleepmillis(10);
- }
- cout << "lock b got" << endl;
- x->fetchAndAdd(1); // 2
- pthread_rwlock_unlock(lk);
- }
-
- static void worker2(pthread_rwlock_t* lk, AtomicUInt32* x) {
- cout << "lock c try" << endl;
- pthread_rwlock_rdlock(lk);
- x->fetchAndAdd(1);
- cout << "lock c got" << endl;
- pthread_rwlock_unlock(lk);
- }
-#endif
- void run() {
-/**
- * note: this test will deadlock if the code breaks
- */
-
-#if defined(__linux__) || defined(__APPLE__)
-
- // create
- pthread_rwlock_t lk;
- verify(pthread_rwlock_init(&lk, 0) == 0);
-
- // read lock
- verify(pthread_rwlock_rdlock(&lk) == 0);
-
- AtomicUInt32 x1(0);
- stdx::thread t1(stdx::bind(worker1, &lk, &x1));
- while (!x1.load())
- ;
- verify(x1.load() == 1);
- sleepmillis(500);
- verify(x1.load() == 1);
-
- AtomicUInt32 x2(0);
-
- stdx::thread t2(stdx::bind(worker2, &lk, &x2));
- t2.join();
- verify(x2.load() == 1);
-
- pthread_rwlock_unlock(&lk);
-
- for (int i = 0; i < 2000; i++) {
- if (x1.load() == 2)
- break;
- sleepmillis(1);
- }
-
- verify(x1.load() == 2);
- t1.join();
-#endif
- }
-};
-
-// we don't use upgrade so that part is not important currently but the other aspects of this test
-// are interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
-class UpgradableTest : public ThreadedTest<7> {
- RWLock m;
-
-public:
- UpgradableTest() : m("utest") {}
-
-private:
- virtual void validate() {}
- virtual void subthread(int x) {
- Client::initThread("utest");
-
- /* r = get a read lock
- R = get a read lock and we expect it to be fast
- u = get upgradable
- U = get upgradable and we expect it to be fast
- w = get a write lock
- */
- // /-- verify upgrade can be done instantly while in a read lock already
- // | /-- verify upgrade acquisition isn't greedy
- // | | /-- verify writes aren't greedy while in upgradable(or are they?)
- // v v v
- const char* what = " RURuRwR";
-
- sleepmillis(100 * x);
-
- int Z = 1;
- LOG(Z) << x << ' ' << what[x] << " request" << endl;
- char ch = what[x];
- switch (ch) {
- case 'w': {
- m.lock();
- LOG(Z) << x << " w got" << endl;
- sleepmillis(100);
- LOG(Z) << x << " w unlock" << endl;
- m.unlock();
- } break;
- case 'u':
- case 'U': {
- Timer t;
- RWLock::Upgradable u(m);
- LOG(Z) << x << ' ' << ch << " got" << endl;
- if (ch == 'U') {
-#if defined(NTDDI_VERSION) && defined(NTDDI_WIN7) && (NTDDI_VERSION >= NTDDI_WIN7)
- // SRW locks are neither fair nor FIFO, as per docs
- if (t.millis() > 2000) {
-#else
- if (t.millis() > 20) {
-#endif
- DEV {
- // a debug buildbot might be slow, try to avoid false positives
- mongo::unittest::log() << "warning lock upgrade was slow " << t.millis()
- << endl;
- }
- else {
- mongo::unittest::log()
- << "assertion failure: lock upgrade was too slow: " << t.millis()
- << endl;
- ASSERT(false);
- }
- }
- }
- sleepsecs(1);
- LOG(Z) << x << ' ' << ch << " unlock" << endl;
- } break;
- case 'r':
- case 'R': {
- Timer t;
- m.lock_shared();
- LOG(Z) << x << ' ' << ch << " got " << endl;
- if (what[x] == 'R') {
- if (t.millis() > 15) {
- // commented out for less chatter, we aren't using upgradeable anyway right
- // now:
- // log() << x << " info: when in upgradable, write locks are still greedy "
- // "on this platform" << endl;
- }
- }
- sleepmillis(200);
- LOG(Z) << x << ' ' << ch << " unlock" << endl;
- m.unlock_shared();
- } break;
- default:
- ASSERT(false);
- }
- }
-};
-
void sleepalittle() {
Timer t;
while (1) {
@@ -451,44 +219,6 @@ private:
}
};
-const int WriteLocksAreGreedy_ThreadCount = 3;
-class WriteLocksAreGreedy : public ThreadedTest<WriteLocksAreGreedy_ThreadCount> {
-public:
- WriteLocksAreGreedy() : m("gtest"), _barrier(WriteLocksAreGreedy_ThreadCount) {}
-
-private:
- RWLock m;
- boost::barrier _barrier;
- virtual void validate() {}
- virtual void subthread(int x) {
- _barrier.wait();
- int Z = 0;
- Client::initThread("utest");
- if (x == 1) {
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
- rwlock_shared lk(m);
- sleepmillis(400);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
- }
- if (x == 2) {
- sleepmillis(100);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
- rwlock lk(m, true);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
- }
- if (x == 3) {
- sleepmillis(200);
- Timer t;
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
- rwlock_shared lk(m);
- LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
- LOG(Z) << t.millis() << endl;
- ASSERT(t.millis() > 50);
- }
- }
-};
-
-
// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but
// only max _nRooms threads should ever get in at once
class TicketHolderWaits : public ThreadedTest<10> {
@@ -559,25 +289,15 @@ public:
All() : Suite("threading") {}
void setupTests() {
- add<WriteLocksAreGreedy>();
-
// Slack is a test to see how long it takes for another thread to pick up
// and begin work after another relinquishes the lock. e.g. a spin lock
// would have very little slack.
add<Slack<SimpleMutex, stdx::lock_guard<SimpleMutex>>>();
- add<Slack<SimpleRWLock, SimpleRWLock::Exclusive>>();
-
- add<UpgradableTest>();
add<IsAtomicWordAtomic<AtomicUInt32>>();
add<IsAtomicWordAtomic<AtomicUInt64>>();
add<ThreadPoolTest>();
- add<RWLockTest1>();
- add<RWLockTest2>();
- add<RWLockTest3>();
- add<RWLockTest4>();
-
add<TicketHolderWaits>();
}
};
diff --git a/src/mongo/util/concurrency/SConscript b/src/mongo/util/concurrency/SConscript
index 32dabcafa63..df0ecc038c2 100644
--- a/src/mongo/util/concurrency/SConscript
+++ b/src/mongo/util/concurrency/SConscript
@@ -67,13 +67,3 @@ env.Library(
'$BUILD_DIR/mongo/util/background_job',
],
)
-
-env.Library(
- target='rwlock',
- source=[
- 'rwlockimpl.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/third_party/shim_boost',
- ],
-)
diff --git a/src/mongo/util/concurrency/rwlock.h b/src/mongo/util/concurrency/rwlock.h
deleted file mode 100644
index b3988efa06b..00000000000
--- a/src/mongo/util/concurrency/rwlock.h
+++ /dev/null
@@ -1,291 +0,0 @@
-// @file rwlock.h generic reader-writer lock (cross platform support)
-
-/*
- * Copyright (C) 2010 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#pragma once
-
-#include "mongo/util/concurrency/mutex.h"
-#include "mongo/util/concurrency/rwlockimpl.h"
-#include "mongo/util/concurrency/simplerwlock.h"
-#include "mongo/util/concurrency/threadlocal.h"
-#include "mongo/util/debug_util.h"
-#include "mongo/util/time_support.h"
-
-namespace mongo {
-
-class RWLock : public RWLockBase {
- enum {
- NilState,
- UpgradableState,
- Exclusive
- } x; // only bother to set when doing upgradable related things
-public:
- const char* const _name;
- RWLock(const char* name) : _name(name) {
- x = NilState;
- }
- void lock() {
- RWLockBase::lock();
- }
- void unlock() {
- RWLockBase::unlock();
- }
-
- void lock_shared() {
- RWLockBase::lock_shared();
- }
- void unlock_shared() {
- RWLockBase::unlock_shared();
- }
-
-private:
- void lockAsUpgradable() {
- RWLockBase::lockAsUpgradable();
- }
- void unlockFromUpgradable() { // upgradable -> unlocked
- RWLockBase::unlockFromUpgradable();
- }
-
-public:
- void upgrade() { // upgradable -> exclusive lock
- verify(x == UpgradableState);
- RWLockBase::upgrade();
- x = Exclusive;
- }
-
- bool lock_shared_try(int millis) {
- return RWLockBase::lock_shared_try(millis);
- }
-
- bool lock_try(int millis = 0) {
- return RWLockBase::lock_try(millis);
- }
-
- /** acquire upgradable state. You must be unlocked before creating.
- unlocks on destruction, whether in upgradable state or upgraded to exclusive
- in the interim.
- */
- class Upgradable {
- MONGO_DISALLOW_COPYING(Upgradable);
- RWLock& _r;
-
- public:
- Upgradable(RWLock& r) : _r(r) {
- r.lockAsUpgradable();
- verify(_r.x == NilState);
- _r.x = RWLock::UpgradableState;
- }
- ~Upgradable() {
- if (_r.x == RWLock::UpgradableState) {
- _r.x = NilState;
- _r.unlockFromUpgradable();
- } else {
- // TEMP verify( _r.x == Exclusive ); // has been upgraded
- _r.x = NilState;
- _r.unlock();
- }
- }
- };
-};
-
-/** throws on failure to acquire in the specified time period. */
-class rwlock_try_write {
- MONGO_DISALLOW_COPYING(rwlock_try_write);
-
-public:
- struct exception {};
- rwlock_try_write(RWLock& l, int millis = 0) : _l(l) {
- if (!l.lock_try(millis))
- throw exception();
- }
- ~rwlock_try_write() {
- _l.unlock();
- }
-
-private:
- RWLock& _l;
-};
-
-class rwlock_shared {
- MONGO_DISALLOW_COPYING(rwlock_shared);
-
-public:
- rwlock_shared(RWLock& rwlock) : _r(rwlock) {
- _r.lock_shared();
- }
- ~rwlock_shared() {
- _r.unlock_shared();
- }
-
-private:
- RWLock& _r;
-};
-
-/* scoped lock for RWLock */
-class rwlock {
- MONGO_DISALLOW_COPYING(rwlock);
-
-public:
- /**
- * @param write acquire write lock if true sharable if false
- * @param lowPriority if > 0, will try to get the lock non-greedily for that many ms
- */
- rwlock(const RWLock& lock,
- bool write,
- /* bool alreadyHaveLock = false , */ int lowPriorityWaitMS = 0)
- : _lock((RWLock&)lock), _write(write) {
- {
- if (_write) {
- _lock.lock();
- } else {
- _lock.lock_shared();
- }
- }
- }
- ~rwlock() {
- if (_write)
- _lock.unlock();
- else
- _lock.unlock_shared();
- }
-
-private:
- RWLock& _lock;
- const bool _write;
-};
-
-// ----------------------------------------------------------------------------------------
-
-/** recursive on shared locks is ok for this implementation */
-class RWLockRecursive : protected RWLockBase {
-protected:
- ThreadLocalValue<int> _state;
- void
- lock(); // not implemented - Lock() should be used; didn't overload this name to avoid mistakes
- virtual void Lock() {
- RWLockBase::lock();
- }
-
-public:
- virtual ~RWLockRecursive() {}
- const char* const _name;
- RWLockRecursive(const char* name) : _name(name) {}
-
- void assertAtLeastReadLocked() {
- verify(_state.get() != 0);
- }
- void assertExclusivelyLocked() {
- verify(_state.get() < 0);
- }
-
- class Exclusive {
- MONGO_DISALLOW_COPYING(Exclusive);
- RWLockRecursive& _r;
-
- public:
- Exclusive(RWLockRecursive& r) : _r(r) {
- int s = _r._state.get();
- dassert(s <= 0);
- if (s == 0)
- _r.Lock();
- _r._state.set(s - 1);
- }
- ~Exclusive() {
- int s = _r._state.get();
- DEV wassert(s < 0); // wassert: don't throw from destructors
- ++s;
- _r._state.set(s);
- if (s == 0)
- _r.unlock();
- }
- };
-
- class Shared {
- MONGO_DISALLOW_COPYING(Shared);
- RWLockRecursive& _r;
- bool _alreadyLockedExclusiveByUs;
-
- public:
- Shared(RWLockRecursive& r) : _r(r) {
- int s = _r._state.get();
- _alreadyLockedExclusiveByUs = s < 0;
- if (!_alreadyLockedExclusiveByUs) {
- dassert(s >= 0); // -1 would mean exclusive
- if (s == 0)
- _r.lock_shared();
- _r._state.set(s + 1);
- }
- }
- ~Shared() {
- if (_alreadyLockedExclusiveByUs) {
- DEV wassert(_r._state.get() < 0);
- } else {
- int s = _r._state.get() - 1;
- DEV wassert(s >= 0);
- _r._state.set(s);
- if (s == 0)
- _r.unlock_shared();
- }
- }
- };
-};
-
-class RWLockRecursiveNongreedy : public RWLockRecursive {
- virtual void Lock() {
- bool got = false;
- for (int i = 0; i < lowPriorityWaitMS; i++) {
- if (lock_try(0)) {
- got = true;
- break;
- }
- int sleep = 1;
- if (i > (lowPriorityWaitMS / 20))
- sleep = 10;
- sleepmillis(sleep);
- i += (sleep - 1);
- }
- if (!got) {
- RWLockBase::lock();
- }
- }
-
-public:
- const int lowPriorityWaitMS;
- RWLockRecursiveNongreedy(const char* nm, int lpwaitms)
- : RWLockRecursive(nm), lowPriorityWaitMS(lpwaitms) {}
- const char* implType() const {
- return RWLockRecursive::implType();
- }
-
- // just for testing:
- bool __lock_try(int millis) {
- return RWLockRecursive::lock_try(millis);
- }
-};
-}
diff --git a/src/mongo/util/concurrency/rwlockimpl.cpp b/src/mongo/util/concurrency/rwlockimpl.cpp
deleted file mode 100644
index 755724cd3a5..00000000000
--- a/src/mongo/util/concurrency/rwlockimpl.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-// @file rwlockimpl.cpp
-
-/**
-* Copyright (C) 2012 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects
-* for all of the code used other than as permitted herein. If you modify
-* file(s) with this exception, you may extend this exception to your
-* version of the file(s), but you are not obligated to do so. If you do not
-* wish to do so, delete this exception statement from your version. If you
-* delete this exception statement from all source files in the program,
-* then also delete it in the license file.
-*/
-
-
-#if defined(_WIN32)
-#define WIN32_LEAN_AND_MEAN
-#define NOMINMAX
-#include <windows.h>
-#endif
-#include <boost/version.hpp>
-#include <map>
-#include <set>
-
-#include "mongo/config.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
-#include "mongo/util/assert_util.h"
-#include "mongo/util/concurrency/rwlockimpl.h"
-#include "mongo/util/concurrency/simplerwlock.h"
-#include "mongo/util/concurrency/threadlocal.h"
-#include "mongo/util/time_support.h"
-
-using namespace std;
-
-namespace mongo {
-
-#if defined(NTDDI_VERSION) && defined(NTDDI_WIN7) && (NTDDI_VERSION >= NTDDI_WIN7)
-SimpleRWLock::SimpleRWLock(StringData p) : name(p.toString()) {
- InitializeSRWLock(&_lock);
-}
-#if defined(MONGO_CONFIG_DEBUG_BUILD)
-// the code below in a debug build will check that we don't try to recursively lock,
-// which is not supported by this class. also checks that you don't unlock without
-// having locked
-void SimpleRWLock::lock() {
- unsigned me = GetCurrentThreadId();
- int& state = s.getRef();
- dassert(state == 0);
- state--;
- AcquireSRWLockExclusive(&_lock);
- tid = me; // this is for use in the debugger to see who does have the lock
-}
-void SimpleRWLock::unlock() {
- int& state = s.getRef();
- dassert(state == -1);
- state++;
- tid = 0xffffffff;
- ReleaseSRWLockExclusive(&_lock);
-}
-void SimpleRWLock::lock_shared() {
- int& state = s.getRef();
- dassert(state == 0);
- state++;
- AcquireSRWLockShared(&_lock);
- shares.fetchAndAdd(1);
-}
-void SimpleRWLock::unlock_shared() {
- int& state = s.getRef();
- dassert(state == 1);
- state--;
- shares.fetchAndSubtract(1);
- ReleaseSRWLockShared(&_lock);
-}
-#else
-void SimpleRWLock::lock() {
- AcquireSRWLockExclusive(&_lock);
-}
-void SimpleRWLock::unlock() {
- ReleaseSRWLockExclusive(&_lock);
-}
-void SimpleRWLock::lock_shared() {
- AcquireSRWLockShared(&_lock);
-}
-void SimpleRWLock::unlock_shared() {
- ReleaseSRWLockShared(&_lock);
-}
-#endif
-#else
-SimpleRWLock::SimpleRWLock(StringData p) : name(p.toString()) {}
-void SimpleRWLock::lock() {
- m.lock();
-}
-void SimpleRWLock::unlock() {
- m.unlock();
-}
-void SimpleRWLock::lock_shared() {
- m.lock_shared();
-}
-void SimpleRWLock::unlock_shared() {
- m.unlock_shared();
-}
-#endif
-}
diff --git a/src/mongo/util/concurrency/rwlockimpl.h b/src/mongo/util/concurrency/rwlockimpl.h
deleted file mode 100644
index 40103080911..00000000000
--- a/src/mongo/util/concurrency/rwlockimpl.h
+++ /dev/null
@@ -1,185 +0,0 @@
-// @file rwlockimpl.h
-
-/**
-* Copyright (C) 2012 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects
-* for all of the code used other than as permitted herein. If you modify
-* file(s) with this exception, you may extend this exception to your
-* version of the file(s), but you are not obligated to do so. If you do not
-* wish to do so, delete this exception statement from your version. If you
-* delete this exception statement from all source files in the program,
-* then also delete it in the license file.
-*/
-
-#pragma once
-
-#include "mongo/stdx/chrono.h"
-#include "mongo/util/concurrency/mutex.h"
-
-#if defined(NTDDI_VERSION) && defined(NTDDI_WIN7) && (NTDDI_VERSION >= NTDDI_WIN7)
-
-// Windows slimreaderwriter version. Newer windows versions only. Under contention this is slower
-// than boost::shared_mutex, but see https://jira.mongodb.org/browse/SERVER-2327 for why it cannot
-// be used.
-
-namespace mongo {
-unsigned long long curTimeMicros64();
-
-class RWLockBase {
- MONGO_DISALLOW_COPYING(RWLockBase);
- friend class SimpleRWLock;
- SRWLOCK _lock;
-
-protected:
- RWLockBase() {
- InitializeSRWLock(&_lock);
- }
- ~RWLockBase() {
- // no special action needed to destroy a SRWLOCK
- }
- void lock() {
- AcquireSRWLockExclusive(&_lock);
- }
- void unlock() {
- ReleaseSRWLockExclusive(&_lock);
- }
- void lock_shared() {
- AcquireSRWLockShared(&_lock);
- }
- void unlock_shared() {
- ReleaseSRWLockShared(&_lock);
- }
- bool lock_shared_try(int millis) {
- if (TryAcquireSRWLockShared(&_lock))
- return true;
- if (millis == 0)
- return false;
- unsigned long long end = curTimeMicros64() + millis * 1000;
- while (1) {
- Sleep(1);
- if (TryAcquireSRWLockShared(&_lock))
- return true;
- if (curTimeMicros64() >= end)
- break;
- }
- return false;
- }
- bool lock_try(int millis = 0) {
- if (TryAcquireSRWLockExclusive(
- &_lock)) // quick check to optimistically avoid calling curTimeMicros64
- return true;
- if (millis == 0)
- return false;
- unsigned long long end = curTimeMicros64() + millis * 1000;
- do {
- Sleep(1);
- if (TryAcquireSRWLockExclusive(&_lock))
- return true;
- } while (curTimeMicros64() < end);
- return false;
- }
- // no upgradable for this impl
- void lockAsUpgradable() {
- lock();
- }
- void unlockFromUpgradable() {
- unlock();
- }
- void upgrade() {}
-
-public:
- const char* implType() const {
- return "WINSRW";
- }
-};
-}
-
-#else
-
-#if defined(_WIN32)
-#include "shared_mutex_win.hpp"
-namespace mongo {
-namespace detail {
-using rwlock_underlying_shared_mutex = boost::modified_shared_mutex;
-} // namespace detail
-} // namespace mongo
-#else
-#include <boost/chrono.hpp>
-#include <boost/thread/shared_mutex.hpp>
-namespace mongo {
-namespace detail {
-using rwlock_underlying_shared_mutex = boost::shared_mutex; // NOLINT
-} // namespace detail
-} // namespace mongo
-#endif
-
-namespace mongo {
-class RWLockBase {
- MONGO_DISALLOW_COPYING(RWLockBase);
- friend class SimpleRWLock;
- detail::rwlock_underlying_shared_mutex _m;
-
-protected:
- RWLockBase() = default;
-
- void lock() {
- _m.lock();
- }
- void unlock() {
- _m.unlock();
- }
- void lockAsUpgradable() {
- _m.lock_upgrade();
- }
- void unlockFromUpgradable() { // upgradable -> unlocked
- _m.unlock_upgrade();
- }
- void upgrade() { // upgradable -> exclusive lock
- _m.unlock_upgrade_and_lock();
- }
- void lock_shared() {
- _m.lock_shared();
- }
- void unlock_shared() {
- _m.unlock_shared();
- }
- bool lock_shared_try(int millis) {
-#if defined(_WIN32)
- return _m.timed_lock_shared(boost::posix_time::milliseconds(millis));
-#else
- return _m.try_lock_shared_for(boost::chrono::milliseconds(millis)); // NOLINT
-#endif
- }
- bool lock_try(int millis = 0) {
-#if defined(_WIN32)
- return _m.timed_lock(boost::posix_time::milliseconds(millis));
-#else
- return _m.try_lock_for(boost::chrono::milliseconds(millis)); // NOLINT
-#endif
- }
-
-public:
- const char* implType() const {
- return "boost";
- }
-};
-}
-
-#endif
diff --git a/src/mongo/util/concurrency/shared_mutex_win.hpp b/src/mongo/util/concurrency/shared_mutex_win.hpp
deleted file mode 100644
index 86ad659a6d8..00000000000
--- a/src/mongo/util/concurrency/shared_mutex_win.hpp
+++ /dev/null
@@ -1,593 +0,0 @@
-#ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
-#define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
-
-// (C) Copyright 2006-8 Anthony Williams
-//
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/* MongoDB :
- Slightly modified boost file to not die above 127 pending writes
- Here is what changed (from boost 1.42.0 shared_mutex.hpp):
- 1,2c1,2
- < #ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP
- < #define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP
- ---
- > #ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
- > #define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
- 22c27
- < class shared_mutex:
- ---
- > class modified_shared_mutex:
- 73c78
- < shared_mutex():
- ---
- > modified_shared_mutex():
- 84c89
- < ~shared_mutex()
- ---
- > ~modified_shared_mutex()
- 283a289,290
- > if( new_state.exclusive_waiting == 127 ) // the maximum already!
- > break;
-*/
-
-#include <boost/assert.hpp>
-#include <boost/detail/interlocked.hpp>
-#include <boost/thread/win32/thread_primitives.hpp>
-#include <boost/static_assert.hpp>
-#include <limits.h>
-#include <boost/thread/thread_time.hpp>
-
-#include <boost/config/abi_prefix.hpp>
-
-namespace boost
-{
- class modified_shared_mutex
- {
- MONGO_DISALLOW_COPYING(modified_shared_mutex);
- private:
- struct state_data
- {
- unsigned shared_count:11,
- shared_waiting:11,
- exclusive:1,
- upgrade:1,
- exclusive_waiting:7,
- exclusive_waiting_blocked:1;
-
- friend bool operator==(state_data const& lhs,state_data const& rhs)
- {
- return *reinterpret_cast<unsigned const*>(&lhs)==*reinterpret_cast<unsigned const*>(&rhs);
- }
- };
-
-
- template<typename T>
- T interlocked_compare_exchange(T* target,T new_value,T comparand)
- {
- BOOST_STATIC_ASSERT(sizeof(T)==sizeof(long));
- long const res=BOOST_INTERLOCKED_COMPARE_EXCHANGE(reinterpret_cast<long*>(target),
- *reinterpret_cast<long*>(&new_value),
- *reinterpret_cast<long*>(&comparand));
- return *reinterpret_cast<T const*>(&res);
- }
-
- state_data state;
- detail::win32::handle semaphores[2];
- detail::win32::handle &unlock_sem;
- detail::win32::handle &exclusive_sem;
- detail::win32::handle upgrade_sem;
-
- void release_waiters(state_data old_state)
- {
- if(old_state.exclusive_waiting)
- {
- BOOST_VERIFY(detail::win32::ReleaseSemaphore(exclusive_sem,1,0)!=0);
- }
-
- if(old_state.shared_waiting || old_state.exclusive_waiting)
- {
- BOOST_VERIFY(detail::win32::ReleaseSemaphore(unlock_sem,old_state.shared_waiting + (old_state.exclusive_waiting?1:0),0)!=0);
- }
- }
-
-
- public:
- modified_shared_mutex():
- unlock_sem(semaphores[0]),
- exclusive_sem(semaphores[1])
- {
- unlock_sem=detail::win32::create_anonymous_semaphore(0,LONG_MAX);
- exclusive_sem=detail::win32::create_anonymous_semaphore(0,LONG_MAX);
- upgrade_sem=detail::win32::create_anonymous_semaphore(0,LONG_MAX);
- state_data state_={0};
- state=state_;
- }
-
- ~modified_shared_mutex()
- {
- detail::win32::CloseHandle(upgrade_sem);
- detail::win32::CloseHandle(unlock_sem);
- detail::win32::CloseHandle(exclusive_sem);
- }
-
- bool try_lock_shared()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- if(!new_state.exclusive && !new_state.exclusive_waiting_blocked)
- {
- ++new_state.shared_count;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- return !(old_state.exclusive| old_state.exclusive_waiting_blocked);
- }
-
- void lock_shared()
- {
- BOOST_VERIFY(timed_lock_shared(::boost::detail::get_system_time_sentinel()));
- }
-
- template<typename TimeDuration>
- bool timed_lock_shared(TimeDuration const & relative_time)
- {
- return timed_lock_shared(get_system_time()+relative_time);
- }
-
- bool timed_lock_shared(boost::system_time const& wait_until)
- {
- for(;;)
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.exclusive || new_state.exclusive_waiting_blocked)
- {
- ++new_state.shared_waiting;
- }
- else
- {
- ++new_state.shared_count;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
-
- if(!(old_state.exclusive| old_state.exclusive_waiting_blocked))
- {
- return true;
- }
-
- unsigned long const res=WaitForSingleObject(unlock_sem,::boost::detail::get_milliseconds_until(wait_until));
- if(res==detail::win32::timeout)
- {
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.exclusive || new_state.exclusive_waiting_blocked)
- {
- if(new_state.shared_waiting)
- {
- --new_state.shared_waiting;
- }
- }
- else
- {
- ++new_state.shared_count;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
-
- if(!(old_state.exclusive| old_state.exclusive_waiting_blocked))
- {
- return true;
- }
- return false;
- }
-
- BOOST_ASSERT(res==0);
- }
- }
-
- void unlock_shared()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- bool const last_reader=!--new_state.shared_count;
-
- if(last_reader)
- {
- if(new_state.upgrade)
- {
- new_state.upgrade=false;
- new_state.exclusive=true;
- }
- else
- {
- if(new_state.exclusive_waiting)
- {
- --new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=false;
- }
- new_state.shared_waiting=0;
- }
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- if(last_reader)
- {
- if(old_state.upgrade)
- {
- BOOST_VERIFY(detail::win32::ReleaseSemaphore(upgrade_sem,1,0)!=0);
- }
- else
- {
- release_waiters(old_state);
- }
- }
- break;
- }
- old_state=current_state;
- }
- }
-
- void lock()
- {
- BOOST_VERIFY(timed_lock(::boost::detail::get_system_time_sentinel()));
- }
-
- template<typename TimeDuration>
- bool timed_lock(TimeDuration const & relative_time)
- {
- return timed_lock(get_system_time()+relative_time);
- }
-
- bool try_lock()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.shared_count || new_state.exclusive)
- {
- return false;
- }
- else
- {
- new_state.exclusive=true;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- return true;
- }
-
-
- bool timed_lock(boost::system_time const& wait_until)
- {
- for(;;)
- {
- state_data old_state=state;
-
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.shared_count || new_state.exclusive)
- {
- if( new_state.exclusive_waiting == 127 ) // the maximum already!
- break;
- ++new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=true;
- }
- else
- {
- new_state.exclusive=true;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
-
- if(!old_state.shared_count && !old_state.exclusive)
- {
- return true;
- }
- unsigned long const wait_res=WaitForMultipleObjects(2,semaphores,true,::boost::detail::get_milliseconds_until(wait_until));
- if(wait_res==detail::win32::timeout)
- {
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.shared_count || new_state.exclusive)
- {
- if(new_state.exclusive_waiting)
- {
- if(!--new_state.exclusive_waiting)
- {
- new_state.exclusive_waiting_blocked=false;
- }
- }
- }
- else
- {
- new_state.exclusive=true;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- if(!old_state.shared_count && !old_state.exclusive)
- {
- return true;
- }
- return false;
- }
- BOOST_ASSERT(wait_res<2);
- }
- }
-
- void unlock()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- new_state.exclusive=false;
- if(new_state.exclusive_waiting)
- {
- --new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=false;
- }
- new_state.shared_waiting=0;
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- release_waiters(old_state);
- }
-
- void lock_upgrade()
- {
- for(;;)
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.exclusive || new_state.exclusive_waiting_blocked || new_state.upgrade)
- {
- ++new_state.shared_waiting;
- }
- else
- {
- ++new_state.shared_count;
- new_state.upgrade=true;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
-
- if(!(old_state.exclusive|| old_state.exclusive_waiting_blocked|| old_state.upgrade))
- {
- return;
- }
-
- BOOST_VERIFY(!WaitForSingleObject(unlock_sem,detail::win32::infinite));
- }
- }
-
- bool try_lock_upgrade()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- if(new_state.exclusive || new_state.exclusive_waiting_blocked || new_state.upgrade)
- {
- return false;
- }
- else
- {
- ++new_state.shared_count;
- new_state.upgrade=true;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- return true;
- }
-
- void unlock_upgrade()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- new_state.upgrade=false;
- bool const last_reader=!--new_state.shared_count;
-
- if(last_reader)
- {
- if(new_state.exclusive_waiting)
- {
- --new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=false;
- }
- new_state.shared_waiting=0;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- if(last_reader)
- {
- release_waiters(old_state);
- }
- break;
- }
- old_state=current_state;
- }
- }
-
- void unlock_upgrade_and_lock()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- bool const last_reader=!--new_state.shared_count;
-
- if(last_reader)
- {
- new_state.upgrade=false;
- new_state.exclusive=true;
- }
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- if(!last_reader)
- {
- BOOST_VERIFY(!WaitForSingleObject(upgrade_sem,detail::win32::infinite));
- }
- break;
- }
- old_state=current_state;
- }
- }
-
- void unlock_and_lock_upgrade()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- new_state.exclusive=false;
- new_state.upgrade=true;
- ++new_state.shared_count;
- if(new_state.exclusive_waiting)
- {
- --new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=false;
- }
- new_state.shared_waiting=0;
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- release_waiters(old_state);
- }
-
- void unlock_and_lock_shared()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- new_state.exclusive=false;
- ++new_state.shared_count;
- if(new_state.exclusive_waiting)
- {
- --new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=false;
- }
- new_state.shared_waiting=0;
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- release_waiters(old_state);
- }
-
- void unlock_upgrade_and_lock_shared()
- {
- state_data old_state=state;
- for(;;)
- {
- state_data new_state=old_state;
- new_state.upgrade=false;
- if(new_state.exclusive_waiting)
- {
- --new_state.exclusive_waiting;
- new_state.exclusive_waiting_blocked=false;
- }
- new_state.shared_waiting=0;
-
- state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
- if(current_state==old_state)
- {
- break;
- }
- old_state=current_state;
- }
- release_waiters(old_state);
- }
-
- };
-}
-
-#include <boost/config/abi_suffix.hpp>
-
-#endif
diff --git a/src/mongo/util/concurrency/simplerwlock.h b/src/mongo/util/concurrency/simplerwlock.h
deleted file mode 100644
index 4673f799f8c..00000000000
--- a/src/mongo/util/concurrency/simplerwlock.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
-* Copyright (C) 2012 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects
-* for all of the code used other than as permitted herein. If you modify
-* file(s) with this exception, you may extend this exception to your
-* version of the file(s), but you are not obligated to do so. If you do not
-* wish to do so, delete this exception statement from your version. If you
-* delete this exception statement from all source files in the program,
-* then also delete it in the license file.
-*/
-
-#pragma once
-
-#include "mongo/base/string_data.h"
-#include "mongo/config.h"
-#include "mongo/platform/atomic_word.h"
-#include "mongo/util/concurrency/threadlocal.h"
-
-namespace mongo {
-
-/** separated out as later the implementation of this may be different than RWLock,
- depending on OS, as there is no upgrade etc. facility herein.
-*/
-class SimpleRWLock {
- MONGO_DISALLOW_COPYING(SimpleRWLock);
-#if defined(NTDDI_VERSION) && defined(NTDDI_WIN7) && (NTDDI_VERSION >= NTDDI_WIN7)
- SRWLOCK _lock;
-#else
- RWLockBase m;
-#endif
-#if defined(_WIN32) && defined(MONGO_CONFIG_DEBUG_BUILD)
- AtomicUInt32 shares;
- ThreadLocalValue<int> s;
- unsigned tid;
-#endif
-public:
- const std::string name;
- SimpleRWLock(StringData name = "");
- void lock();
- void unlock();
- void lock_shared();
- void unlock_shared();
- class Shared {
- MONGO_DISALLOW_COPYING(Shared);
- SimpleRWLock& _r;
-
- public:
- Shared(SimpleRWLock& rwlock) : _r(rwlock) {
- _r.lock_shared();
- }
- ~Shared() {
- _r.unlock_shared();
- }
- };
- class Exclusive {
- MONGO_DISALLOW_COPYING(Exclusive);
- SimpleRWLock& _r;
-
- public:
- Exclusive(SimpleRWLock& rwlock) : _r(rwlock) {
- _r.lock();
- }
- ~Exclusive() {
- _r.unlock();
- }
- };
-};
-}
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
index b2e6fcf2852..f44ef37799a 100644
--- a/src/mongo/util/processinfo.h
+++ b/src/mongo/util/processinfo.h
@@ -245,6 +245,4 @@ public:
};
bool writePidFile(const std::string& path);
-
-void printMemInfo(const char* whereContextStr = 0);
}