diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2014-05-22 14:28:56 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2014-05-28 16:13:47 -0400 |
commit | ee3fb776c7f36d59b593db7e4165b0611a7a503f (patch) | |
tree | 46a523ea698d1a9a80013816d8e568e2fdd2f2dc /src | |
parent | b27a764852873f5ecdc2dde6fffc317383c032c2 (diff) | |
download | mongo-ee3fb776c7f36d59b593db7e4165b0611a7a503f.tar.gz |
SERVER-13961 Remove thread-local write intents
This change makes all write intents go straight to the global write
intents list. Cleaning this up is necessary as a step to going to a
per-operation intents list, but it may cause more contention on the global
intents list.
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/d_concurrency.cpp | 23 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur.cpp | 16 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_commitjob.cpp | 117 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_commitjob.h | 24 | ||||
-rw-r--r-- | src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp | 3 |
5 files changed, 7 insertions, 176 deletions
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp index edd21c0d88a..7f84d776bb1 100644 --- a/src/mongo/db/d_concurrency.cpp +++ b/src/mongo/db/d_concurrency.cpp @@ -70,11 +70,6 @@ namespace mongo { virtual ~DBTryLockTimeoutException() throw() { } }; - namespace dur { - void assertNothingSpooled(); - void releasingWriteLock(); - } - /* dbname->lock Currently these are never deleted - will linger if db was closed. (that should be fine.) We don't put the lock inside the Database object as those can come and go with open and @@ -99,10 +94,6 @@ namespace mongo { return &nestableLocks[db]->getStats(); } - static void locked_W(); - static void unlocking_w(); - static void unlocking_W(); - class WrapperForQLock { QLock q; public: @@ -139,7 +130,6 @@ namespace mongo { { q.lock_W(); } - locked_W(); } // how to count try's that fail is an interesting question. we should get rid of try(). @@ -156,7 +146,6 @@ namespace mongo { bool got = q.lock_W_try(millis); if( got ) { lockState().lockedStart( 'W' ); - locked_W(); } return got; } @@ -168,7 +157,6 @@ namespace mongo { } void unlock_w() { - unlocking_w(); wassert( threadState() == 'w' ); lockState().unlocked(); q.unlock_w(); @@ -178,7 +166,6 @@ namespace mongo { void unlock_W() { wassert( threadState() == 'W' ); - unlocking_W(); lockState().unlocked(); q.unlock_W(); } @@ -772,16 +759,6 @@ namespace mongo { readlocktry::~readlocktry() { } - void locked_W() { - } - void unlocking_w() { - // we can't commit early in this case; so a bit more to do here. - dur::releasingWriteLock(); - } - void unlocking_W() { - dur::releasingWriteLock(); - } - class GlobalLockServerStatusSection : public ServerStatusSection { public: GlobalLockServerStatusSection() : ServerStatusSection( "globalLock" ){ diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp index 0445db976eb..0bfab752496 100644 --- a/src/mongo/db/storage/mmap_v1/dur.cpp +++ b/src/mongo/db/storage/mmap_v1/dur.cpp @@ -95,9 +95,6 @@ namespace mongo { namespace dur { - void assertNothingSpooled(); - void unspoolWriteIntents(); - void PREPLOGBUFFER(JSectHeader& outParm, AlignedBuilder&); void WRITETOJOURNAL(JSectHeader h, AlignedBuilder& uncompressed); void WRITETODATAFILES(const JSectHeader& h, AlignedBuilder& uncompressed); @@ -269,8 +266,6 @@ namespace mongo { } bool DurableImpl::isCommitNeeded() const { - DEV commitJob._nSinceCommitIfNeededCall = 0; - unspoolWriteIntents(); return commitJob.bytes() > UncommittedBytesLimit; } @@ -348,8 +343,6 @@ namespace mongo { // spot in an operation to be terminated. cc().checkpointHappened(); - unspoolWriteIntents(); - DEV commitJob._nSinceCommitIfNeededCall = 0; if( likely( commitJob.bytes() < UncommittedBytesLimit && !force ) ) { return false; } @@ -574,7 +567,6 @@ namespace mongo { static AlignedBuilder __theBuilder(4 * 1024 * 1024); static bool _groupCommitWithLimitedLocks() { - unspoolWriteIntents(); // in case we were doing some writing ourself (likely impossible with limitedlocks version) AlignedBuilder &ab = __theBuilder; verify( ! Lock::isLocked() ); @@ -663,9 +655,6 @@ namespace mongo { // We are 'R' or 'W' assertLockedForCommitting(); - - unspoolWriteIntents(); // in case we were doing some writing ourself - { AlignedBuilder &ab = __theBuilder; @@ -860,11 +849,6 @@ namespace mongo { } void recover(); - - void releasingWriteLock() { - unspoolWriteIntents(); - } - void preallocateFiles(); /** at startup, recover, and then start the journal threads */ diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp index 63f7906c27f..2e44b2f602e 100644 --- a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp @@ -39,109 +39,14 @@ namespace mongo { -#if defined(_DEBUG) && (defined(_WIN64) || !defined(_WIN32)) -#define CHECK_SPOOLING 1 -#endif - - namespace dur { - - ThreadLocalIntents::ThreadLocalIntents() { - intents.reserve(N); - } - - ThreadLocalIntents::~ThreadLocalIntents() { - fassert( 16731, intents.size() == 0 ); - } - - void ThreadLocalIntents::push(const WriteIntent& x) { - intents.push_back( x ); -#if( CHECK_SPOOLING ) - nSpooled++; -#endif - if( intents.size() == N ) { - if ( !condense() ) { - unspool(); - } - } - } - - // we are in groupCommitMutex when this is called - void ThreadLocalIntents::_unspool() { - dassert( intents.size() ); - - commitJob._hasWritten = true; - - for( unsigned j = 0; j < intents.size(); j++ ) { - commitJob.note(intents[j].start(), intents[j].length()); - } - -#if( CHECK_SPOOLING ) - nSpooled.signedAdd( -1 * static_cast<int>(intents.size()) ); -#endif - - intents.clear(); - } - - bool ThreadLocalIntents::condense() { - std::sort( intents.begin(), intents.end() ); - - bool didAnything = false; - - for ( unsigned x = 0; x < intents.size() - 1 ; x++ ) { - if ( intents[x].overlaps( intents[x+1] ) ) { - intents[x].absorb( intents[x+1] ); - intents.erase( intents.begin() + x + 1 ); - x--; - didAnything = true; -#if( CHECK_SPOOLING ) - nSpooled.signedAdd(-1); -#endif - } - } - - return didAnything; - } - - void ThreadLocalIntents::unspool() { - if ( intents.size() ) { - SimpleMutex::scoped_lock lk(commitJob.groupCommitMutex); - _unspool(); - } - } - AtomicUInt ThreadLocalIntents::nSpooled; - } - - TSP_DECLARE(dur::ThreadLocalIntents,tlIntents) - TSP_DEFINE(dur::ThreadLocalIntents,tlIntents) - namespace dur { - void assertNothingSpooled() { -#if( CHECK_SPOOLING ) - if( ThreadLocalIntents::nSpooled != 0 ) { - log() << ThreadLocalIntents::nSpooled.get() << endl; - if( tlIntents.get() ) - log() << "me:" << tlIntents.get()->n_informational() << endl; - else - log() << "no tlIntent for my thread" << endl; - verify(false); - } -#endif - } - // when we release our w or W lock this is invoked - void unspoolWriteIntents() { - ThreadLocalIntents *t = tlIntents.get(); - if( t ) - t->unspool(); - } - /** base declare write intent function that all the helpers call. */ /** we batch up our write intents so that we do not have to synchronize too often */ void DurableImpl::declareWriteIntent(void *p, unsigned len) { dassert( Lock::somethingWriteLocked() ); MemoryMappedFile::makeWritable(p, len); - ThreadLocalIntents *t = tlIntents.getMake(); - t->push(WriteIntent(p,len)); + commitJob.note(p, len); } BOOST_STATIC_ASSERT( UncommittedBytesLimit > BSONObjMaxInternalSize * 3 ); @@ -202,7 +107,6 @@ namespace mongo { _intentsAndDurOps.clear(); privateMapBytes += _bytes; _bytes = 0; - _nSinceCommitIfNeededCall = 0; } CommitJob::CommitJob() : @@ -211,14 +115,12 @@ namespace mongo { { _commitNumber = 0; _bytes = 0; - _nSinceCommitIfNeededCall = 0; } void CommitJob::note(void* p, int len) { dassert( Lock::somethingWriteLocked() ); - groupCommitMutex.dassertLocked(); - - dassert( _hasWritten ); + SimpleMutex::scoped_lock lk(groupCommitMutex); + _hasWritten = true; // from the point of view of the dur module, it would be fine (i think) to only // be read locked here. but must be at least read locked to avoid race with @@ -262,18 +164,7 @@ namespace mongo { lastPos = x; unsigned b = (len+4095) & ~0xfff; _bytes += b; -#if defined(_DEBUG) - _nSinceCommitIfNeededCall++; - if( _nSinceCommitIfNeededCall >= 80 ) { - if( _nSinceCommitIfNeededCall % 40 == 0 ) { - log() << "debug nsincecommitifneeded:" << _nSinceCommitIfNeededCall << " bytes:" << _bytes << endl; - if( _nSinceCommitIfNeededCall == 240 || _nSinceCommitIfNeededCall == 1200 ) { - log() << "_DEBUG printing stack given high nsinccommitifneeded number" << endl; - printStackTrace(); - } - } - } -#endif + if (_bytes > UncommittedBytesLimit * 3) { static time_t lastComplain; static unsigned nComplains; diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.h b/src/mongo/db/storage/mmap_v1/dur_commitjob.h index 5f32ee46e35..48ac023e68b 100644 --- a/src/mongo/db/storage/mmap_v1/dur_commitjob.h +++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.h @@ -115,21 +115,6 @@ namespace mongo { #endif }; - /** so we don't have to lock the groupCommitMutex too often */ - class ThreadLocalIntents { - enum { N = 21 }; - std::vector<dur::WriteIntent> intents; - bool condense(); - void _unspool(); - public: - ThreadLocalIntents(); - ~ThreadLocalIntents(); - void unspool(); - void push(const WriteIntent& i); - int n_informational() const { return intents.size(); } - static AtomicUInt nSpooled; - }; - /** A commit job object for a group commit. Currently there is one instance of this object. concurrency: assumption is caller is appropriately locking. @@ -140,11 +125,6 @@ namespace mongo { void _committingReset(); ~CommitJob(){ verify(!"shouldn't destroy CommitJob!"); } - /** record/note an intent to write */ - void note(void* p, int len); - // only called by : - friend class ThreadLocalIntents; - public: SimpleMutex groupCommitMutex; CommitJob(); @@ -152,6 +132,9 @@ namespace mongo { /** note an operation other than a "basic write". threadsafe (locks in the impl) */ void noteOp(shared_ptr<DurOp> p); + /** record/note an intent to write */ + void note(void* p, int len); + std::vector< shared_ptr<DurOp> >& ops() { dassert( Lock::isLocked() ); // a rather weak check, we require more than that groupCommitMutex.dassertLocked(); // this is what really makes the below safe @@ -198,7 +181,6 @@ namespace mongo { size_t _bytes; public: NotifyAll _notify; // for getlasterror fsync:true acknowledgements - unsigned _nSinceCommitIfNeededCall; // for asserts and debugging }; extern CommitJob& commitJob; diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp index 223446b71e6..d5291658254 100644 --- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp @@ -126,8 +126,6 @@ namespace mongo { } } - void assertNothingSpooled(); - /** basic write ops / write intents. note there is no particular order to these : if we have two writes to the same location during the group commit interval, it is likely (although not assured) that it is journaled here once. @@ -139,7 +137,6 @@ namespace mongo { // switches will be rare as we sort by memory location first and we batch commit. RelativePath lastDbPath; - assertNothingSpooled(); const vector<WriteIntent>& _intents = commitJob.getIntentsSorted(); // right now the durability code assumes there is at least one write intent |