diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-01-08 13:29:06 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-01-12 10:36:22 -0500 |
commit | 9566e29b1dd85ada54d5a5924d8f3f38c517a74f (patch) | |
tree | 2074379a5fd5fac6ed78e59f73c23dd31db81519 | |
parent | c898c5ba48b780d6b5da8be3e1a5d3b9556a9694 (diff) | |
download | mongo-9566e29b1dd85ada54d5a5924d8f3f38c517a74f.tar.gz |
SERVER-16065 Move stuff out of CommitJob and into dur.cpp
No need to duplicate this functionality.
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur.cpp | 23 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_commitjob.cpp | 60 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_commitjob.h | 18 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_recover.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_stats.h | 2 |
5 files changed, 49 insertions, 64 deletions
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp index e7dccfb019b..a7840cc34a4 100644 --- a/src/mongo/db/storage/mmap_v1/dur.cpp +++ b/src/mongo/db/storage/mmap_v1/dur.cpp @@ -102,6 +102,10 @@ namespace { boost::mutex flushMutex; boost::condition_variable flushRequested; + // for getlasterror fsync:true acknowledgements + NotifyAll::When commitNumber(0); + NotifyAll commitNotify; + // When set, the flush thread will exit AtomicUInt32 shutdownRequested(0); @@ -281,19 +285,19 @@ namespace { // bool DurableImpl::commitNow(OperationContext* txn) { - NotifyAll::When when = commitJob._notify.now(); + NotifyAll::When when = commitNotify.now(); AutoYieldFlushLockForMMAPV1Commit flushLockYield(txn->lockState()); // There is always just one waiting anyways flushRequested.notify_one(); - commitJob._notify.waitFor(when); + commitNotify.waitFor(when); return true; } bool DurableImpl::awaitCommit() { - commitJob._notify.awaitBeyondNow(); + commitNotify.awaitBeyondNow(); return true; } @@ -329,11 +333,11 @@ namespace { } void DurableImpl::commitAndStopDurThread() { - NotifyAll::When when = commitJob._notify.now(); + NotifyAll::When when = commitNotify.now(); // There is always just one waiting anyways flushRequested.notify_one(); - commitJob._notify.waitFor(when); + commitNotify.waitFor(when); shutdownRequested.store(1); } @@ -585,7 +589,7 @@ namespace { break; } - if (commitJob._notify.nWaiting()) { + if (commitNotify.nWaiting()) { // One or more getLastError j:true is pending break; } @@ -602,13 +606,13 @@ namespace { OperationContextImpl txn; AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(txn.lockState()); - commitJob.commitingBegin(); + commitNumber = commitNotify.now(); if (!commitJob.hasWritten()) { // getlasterror request could have came after the data was already committed. // No need to call committingReset though, because we have not done any // writes (hasWritten == false). - commitJob.committingNotifyCommitted(); + commitNotify.notifyAll(commitNumber); } else { JSectHeader h; @@ -653,7 +657,7 @@ namespace { // journalBuilder and hence will not be persisted, but in this case // commitJob.commitingBegin() bumps the commit number, so those writers will // wait for the next run of this loop. - commitJob.committingNotifyCommitted(); + commitNotify.notifyAll(commitNumber); // Apply the journal entries on top of the shared view so that when flush // is requested it would write the latest. @@ -675,6 +679,7 @@ namespace { journalBuilder.reset(); } + stats.curr->_commits++; LOG(4) << "groupCommit end"; } catch (DBException& e) { diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp index 49cec5c3862..ca33fdf593e 100644 --- a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp @@ -30,10 +30,7 @@ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage -#define MONGO_PCH_WHITELISTED #include "mongo/platform/basic.h" -#include "mongo/pch.h" -#undef MONGO_PCH_WHITELISTED #include "mongo/db/storage/mmap_v1/dur_commitjob.h" @@ -112,11 +109,6 @@ namespace mongo { _intentsAndDurOps._durOps.push_back(p); } - void CommitJob::commitingBegin() { - _commitNumber = _notify.now(); - stats.curr->_commits++; - } - void CommitJob::committingReset() { _hasWritten = false; _intentsAndDurOps.clear(); @@ -125,10 +117,10 @@ namespace mongo { CommitJob::CommitJob() : groupCommitMutex("groupCommit"), - _hasWritten(false) - { - _commitNumber = 0; - _bytes = 0; + _hasWritten(false), + _lastNotedPos(0), + _bytes(0) { + } void CommitJob::note(void* p, int len) { @@ -138,14 +130,14 @@ namespace mongo { // be read locked here. but must be at least read locked to avoid race with // remapprivateview - if( !_intentsAndDurOps._alreadyNoted.checkAndSet(p, len) ) { + if (!_intentsAndDurOps._alreadyNoted.checkAndSet(p, len)) { /** tips for debugging: if you have an incorrect diff between data files in different folders (see jstests/dur/quick.js for example), turn this on and see what is logged. if you have a copy of its output from before the regression, a simple diff of these lines would tell you a lot likely. - */ + */ #if 0 && defined(_DEBUG) { static int n; @@ -165,29 +157,27 @@ namespace mongo { } #endif - // remember intent. we will journal it in a bit + // Remember intent. We will journal it in a bit. _intentsAndDurOps.insertWriteIntent(p, len); - { - // a bit over conservative in counting pagebytes used - static size_t lastPos; // note this doesn't reset with each commit, but that is ok we aren't being that precise - size_t x = ((size_t) p) & ~0xfff; // round off to page address (4KB) - if( x != lastPos ) { - lastPos = x; - unsigned b = (len+4095) & ~0xfff; - _bytes += b; - - if (_bytes > UncommittedBytesLimit * 3) { - static time_t lastComplain; - static unsigned nComplains; - // throttle logging - if( ++nComplains < 100 || time(0) - lastComplain >= 60 ) { - lastComplain = time(0); - warning() << "DR102 too much data written uncommitted " << _bytes/1000000.0 << "MB" << endl; - if( nComplains < 10 || nComplains % 10 == 0 ) { - // wassert makes getLastError show an error, so we just print stack trace - printStackTrace(); - } + // Round off to page address (4KB) + const size_t x = ((size_t)p) & ~0xfff; + + if (x != _lastNotedPos) { + _lastNotedPos = x; + unsigned b = (len + 4095) & ~0xfff; + _bytes += b; + + if (_bytes > UncommittedBytesLimit * 3) { + static time_t lastComplain; + static unsigned nComplains; + // throttle logging + if (++nComplains < 100 || time(0) - lastComplain >= 60) { + lastComplain = time(0); + warning() << "DR102 too much data written uncommitted " << _bytes / 1000000.0 << "MB" << endl; + if (nComplains < 10 || nComplains % 10 == 0) { + // wassert makes getLastError show an error, so we just print stack trace + printStackTrace(); } } } diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.h b/src/mongo/db/storage/mmap_v1/dur_commitjob.h index d683cd12b32..70302375557 100644 --- a/src/mongo/db/storage/mmap_v1/dur_commitjob.h +++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.h @@ -138,7 +138,6 @@ namespace mongo { ~CommitJob(){ verify(!"shouldn't destroy CommitJob!"); } public: - SimpleMutex groupCommitMutex; CommitJob(); /** note an operation other than a "basic write". threadsafe (locks in the impl) */ @@ -157,12 +156,6 @@ namespace mongo { bool hasWritten() const { return _hasWritten; } public: - /** these called by the groupCommit code as it goes along */ - void commitingBegin(); - /** the commit code calls this when data reaches the journal (on disk) */ - void committingNotifyCommitted() { - _notify.notifyAll(_commitNumber); - } /** we use the commitjob object over and over, calling reset() rather than reconstructing */ void committingReset(); @@ -179,14 +172,17 @@ namespace mongo { return _intentsAndDurOps._intents; } - bool _hasWritten; + SimpleMutex groupCommitMutex; private: - NotifyAll::When _commitNumber; + // Contains the write intents + bool _hasWritten; IntentsAndDurOps _intentsAndDurOps; + + // Used to count the private map used bytes. Note that _lastNotedPos doesn't reset + // with each commit, but that is ok we aren't being that precise. + size_t _lastNotedPos; size_t _bytes; - public: - NotifyAll _notify; // for getlasterror fsync:true acknowledgements }; extern CommitJob& commitJob; diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp index 2b1964f24d5..f568ee34e78 100644 --- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp @@ -70,8 +70,6 @@ using namespace mongoutils; namespace mongo { - using boost::shared_ptr; - /** * Thrown when a journal section is corrupt. This is considered OK as long as it occurs while * processing the last file. Processing stops at the first corrupt section. @@ -94,7 +92,7 @@ namespace mongo { const JEntry *e; // local db sentinel is already parsed out here into dbName // if not one of the two simple JEntry's above, this is the operation: - shared_ptr<DurOp> op; + boost::shared_ptr<DurOp> op; }; void removeJournalFiles(); @@ -609,12 +607,6 @@ namespace mongo { ScopedTransaction transaction(&txn, MODE_X); Lock::GlobalWrite lk(txn.lockState()); - // can't lock groupCommitMutex here as - // DurableMappedFile::close()->closingFileNotication()->groupCommit() will lock it - // and that would be recursive. - // - // SimpleMutex::scoped_lock lk2(commitJob.groupCommitMutex); - _recover(); // throws on interruption } diff --git a/src/mongo/db/storage/mmap_v1/dur_stats.h b/src/mongo/db/storage/mmap_v1/dur_stats.h index a3b7fccee1f..5f6d21c939d 100644 --- a/src/mongo/db/storage/mmap_v1/dur_stats.h +++ b/src/mongo/db/storage/mmap_v1/dur_stats.h @@ -41,6 +41,7 @@ namespace mongo { void rotate(); BSONObj asObj(); unsigned _intervalMicros; + struct S { BSONObj _asObj(); std::string _asCSV(); @@ -59,6 +60,7 @@ namespace mongo { int _dtMillis; }; + S *curr; private: S _a,_b; |