diff options
author | Eliot Horowitz <eliot@10gen.com> | 2011-05-02 14:39:16 -0400 |
---|---|---|
committer | Eliot Horowitz <eliot@10gen.com> | 2011-05-02 14:39:16 -0400 |
commit | aaad7824cddd8db18263346c1258ad86ce76abec (patch) | |
tree | 4c9aa03d18494c2b96efcac4b6ac65c4e7f670ae /s | |
parent | e98a9e6c2d1a4754305c7ba9f09a83fafdacc985 (diff) | |
download | mongo-aaad7824cddd8db18263346c1258ad86ce76abec.tar.gz |
put all migrate log messages in a migrate RamLog
Diffstat (limited to 's')
-rw-r--r-- | s/d_migrate.cpp | 107 |
1 files changed, 55 insertions, 52 deletions
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp index 998785ade1e..ddc06f50375 100644 --- a/s/d_migrate.cpp +++ b/s/d_migrate.cpp @@ -43,6 +43,7 @@ #include "../util/queue.h" #include "../util/unittest.h" #include "../util/processinfo.h" +#include "../util/ramlog.h" #include "shard.h" #include "d_logic.h" @@ -53,6 +54,8 @@ using namespace std; namespace mongo { + Tee* migrateLog = new RamLog( "migrate" ); + class MoveTimingHelper { public: MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max , int total ) @@ -72,7 +75,7 @@ namespace mongo { configServer.logChange( (string)"moveChunk." + _where , _ns, _b.obj() ); } catch ( const std::exception& e ) { - warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << endl; + warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << migrateLog; } } @@ -88,7 +91,7 @@ namespace mongo { if ( op ) op->setMessage( s.c_str() ); else - warning() << "op is null in MoveTimingHelper::done" << endl; + warning() << "op is null in MoveTimingHelper::done" << migrateLog; _b.appendNumber( s , _t.millis() ); _t.reset(); @@ -98,7 +101,7 @@ namespace mongo { ProcessInfo pi; ss << " v:" << pi.getVirtualMemorySize() << " r:" << pi.getResidentSize(); - log() << ss.str() << endl; + log() << ss.str() << migrateLog; #endif } @@ -156,7 +159,7 @@ namespace mongo { writelock lk(ns); RemoveSaver rs("moveChunk",ns,"post-cleanup"); long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 ); - log() << "moveChunk deleted: " << num << endl; + log() << "moveChunk deleted: " << num << migrateLog; } }; @@ -167,7 +170,7 @@ namespace mongo { void _cleanupOldData( OldDataCleanup cleanup ) { Client::initThread( cleanUpThreadName ); - log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl; + log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << migrateLog; int loops = 0; Timer t; @@ -190,14 +193,14 @@ namespace mongo { cleanup.initial = left; if ( ( loops++ % 200 ) == 0 ) { - log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl; + log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << migrateLog; stringstream ss; for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) { CursorId id = *i; ss << id << " "; } - log() << " cursors: " << ss.str() << endl; + log() << " cursors: " << ss.str() << migrateLog; } } @@ -211,10 +214,10 @@ namespace mongo { _cleanupOldData( cleanup ); } catch ( std::exception& e ) { - log() << " error cleaning old data:" << e.what() << endl; + log() << " error cleaning old data:" << e.what() << migrateLog; } catch ( ... ) { - log() << " unknown error cleaning old data" << endl; + log() << " unknown error cleaning old data" << migrateLog; } } @@ -225,7 +228,7 @@ namespace mongo { } virtual void help( stringstream& help ) const { - help << "internal - should not be called directly" << endl; + help << "internal - should not be called directly" << migrateLog; } virtual bool slaveOk() const { return false; } virtual bool adminOnly() const { return true; } @@ -304,7 +307,7 @@ namespace mongo { ide = obj["_id"]; if ( ide.eoo() ) { - warning() << "logOpForSharding got mod with no _id, ignoring obj: " << obj << endl; + warning() << "logOpForSharding got mod with no _id, ignoring obj: " << obj << migrateLog; return; } @@ -333,7 +336,7 @@ namespace mongo { case 'u': if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) { - warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << endl; + warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << migrateLog; return; } break; @@ -473,14 +476,14 @@ namespace mongo { warning() << "can't move chunk of size (aprox) " << recCount * avgRecSize << " because maximum size allowed to move is " << maxChunkSize << " ns: " << _ns << " " << _min << " -> " << _max - << endl; + << migrateLog; result.appendBool( "chunkTooBig" , true ); result.appendNumber( "chunkSize" , (long long)(recCount * avgRecSize) ); errmsg = "chunk too big to move"; return false; } - log() << "moveChunk number of documents: " << _cloneLocs.size() << endl; + log() << "moveChunk number of documents: " << _cloneLocs.size() << migrateLog; return true; } @@ -606,7 +609,7 @@ namespace mongo { public: MoveChunkCommand() : Command( "moveChunk" ) {} virtual void help( stringstream& help ) const { - help << "should not be calling this directly" << endl; + help << "should not be calling this directly" << migrateLog; } virtual bool slaveOk() const { return false; } @@ -690,7 +693,7 @@ namespace mongo { Shard fromShard( from ); Shard toShard( to ); - log() << "received moveChunk request: " << cmdObj << endl; + log() << "received moveChunk request: " << cmdObj << migrateLog; timing.done(1); @@ -740,7 +743,7 @@ namespace mongo { result.append( "requestedMax" , max ); warning() << "aborted moveChunk because" << errmsg << ": " << min << "->" << max - << " is now " << currMin << "->" << currMax << endl; + << " is now " << currMin << "->" << currMax << migrateLog; return false; } @@ -750,7 +753,7 @@ namespace mongo { result.append( "official" , myOldShard ); warning() << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard - << " and not at " << fromShard.getName() << endl; + << " and not at " << fromShard.getName() << migrateLog; return false; } @@ -760,7 +763,7 @@ namespace mongo { result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) ); warning() << "aborted moveChunk because " << errmsg << ": official " << maxVersion - << " mine: " << shardingState.getVersion(ns) << endl; + << " mine: " << shardingState.getVersion(ns) << migrateLog; return false; } @@ -769,7 +772,7 @@ namespace mongo { ShardChunkVersion shardVersion; shardingState.trySetVersion( ns , shardVersion /* will return updated */ ); - log() << "moveChunk request accepted at version " << shardVersion << endl; + log() << "moveChunk request accepted at version " << shardVersion << migrateLog; } timing.done(2); @@ -814,10 +817,10 @@ namespace mongo { res = res.getOwned(); conn.done(); - log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << endl; + log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog; if ( ! ok || res["state"].String() == "fail" ) { - warning() << "moveChunk error transfering data caused migration abort: " << res << endl; + warning() << "moveChunk error transfering data caused migration abort: " << res << migrateLog; errmsg = "data transfer error"; result.append( "cause" , res ); return false; @@ -834,7 +837,7 @@ namespace mongo { conn->runCommand( "admin" , BSON( "_recvChunkAbort" << 1 ) , res ); res = res.getOwned(); conn.done(); - error() << "aborting migrate because too much memory used res: " << res << endl; + error() << "aborting migrate because too much memory used res: " << res << migrateLog; errmsg = "aborting migrate because too much memory used"; result.appendBool( "split" , true ); return false; @@ -862,7 +865,7 @@ namespace mongo { shardingState.donateChunk( ns , min , max , myVersion ); } - log() << "moveChunk setting version to: " << myVersion << endl; + log() << "moveChunk setting version to: " << myVersion << migrateLog; // 5.b // we're under the collection lock here, too, so we can undo the chunk donation because no other state change @@ -884,14 +887,14 @@ namespace mongo { } log() << "moveChunk migrate commit not accepted by TO-shard: " << res - << " resetting shard version to: " << currVersion << endl; + << " resetting shard version to: " << currVersion << migrateLog; errmsg = "_recvChunkCommit failed!"; result.append( "cause" , res ); return false; } - log() << "moveChunk migrate commit accepted by TO-shard: " << res << endl; + log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog; } // 5.c @@ -972,12 +975,12 @@ namespace mongo { updates.append( op.obj() ); log() << "moveChunk updating self version to: " << nextVersion << " through " - << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << endl; + << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << migrateLog; } else { - log() << "moveChunk moved last chunk out for collection '" << ns << "'" << endl; + log() << "moveChunk moved last chunk out for collection '" << ns << "'" << migrateLog; } updates.done(); @@ -998,7 +1001,7 @@ namespace mongo { preCond.done(); BSONObj cmd = cmdBuilder.obj(); - log(7) << "moveChunk update: " << cmd << endl; + log(7) << "moveChunk update: " << cmd << migrateLog; bool ok = false; BSONObj cmdResult; @@ -1008,7 +1011,7 @@ namespace mongo { conn.done(); } catch ( DBException& e ) { - warning() << e << endl; + warning() << e << migrateLog; ok = false; BSONObjBuilder b; e.getInfo().append( b ); @@ -1024,7 +1027,7 @@ namespace mongo { // if the commit did not make it, currently the only way to fix this state is to bounce the mongod so // that the old state (before migrating) be brought in - warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << endl; + warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << migrateLog; sleepsecs( 10 ); try { @@ -1036,13 +1039,13 @@ namespace mongo { ShardChunkVersion checkVersion = doc["lastmod"]; if ( checkVersion == nextVersion ) { - log() << "moveChunk commit confirmed" << endl; + log() << "moveChunk commit confirmed" << migrateLog; } else { error() << "moveChunk commit failed: version is at" - << checkVersion << " instead of " << nextVersion << endl; - error() << "TERMINATING" << endl; + << checkVersion << " instead of " << nextVersion << migrateLog; + error() << "TERMINATING" << migrateLog; dbexit( EXIT_SHARDING_ERROR ); } @@ -1050,8 +1053,8 @@ namespace mongo { } catch ( ... ) { - error() << "moveChunk failed to get confirmation of commit" << endl; - error() << "TERMINATING" << endl; + error() << "moveChunk failed to get confirmation of commit" << migrateLog; + error() << "TERMINATING" << migrateLog; dbexit( EXIT_SHARDING_ERROR ); } } @@ -1073,11 +1076,11 @@ namespace mongo { c.max = max.getOwned(); ClientCursor::find( ns , c.initial ); if ( c.initial.size() ) { - log() << "forking for cleaning up chunk data" << endl; + log() << "forking for cleaning up chunk data" << migrateLog; boost::thread t( boost::bind( &cleanupOldData , c ) ); } else { - log() << "doing delete inline" << endl; + log() << "doing delete inline" << migrateLog; // 7. c.doRemove(); } @@ -1136,12 +1139,12 @@ namespace mongo { catch ( std::exception& e ) { state = FAIL; errmsg = e.what(); - error() << "migrate failed: " << e.what() << endl; + error() << "migrate failed: " << e.what() << migrateLog; } catch ( ... ) { state = FAIL; errmsg = "UNKNOWN ERROR"; - error() << "migrate failed with unknown exception" << endl; + error() << "migrate failed with unknown exception" << migrateLog; } setActive( false ); } @@ -1185,7 +1188,7 @@ namespace mongo { RemoveSaver rs( "moveChunk" , ns , "preCleanup" ); long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 ); if ( num ) - warning() << "moveChunkCmd deleted data already in chunk # objects: " << num << endl; + warning() << "moveChunkCmd deleted data already in chunk # objects: " << num << migrateLog; timing.done(2); } @@ -1201,7 +1204,7 @@ namespace mongo { state = FAIL; errmsg = "_migrateClone failed: "; errmsg += res.toString(); - error() << errmsg << endl; + error() << errmsg << migrateLog; conn.done(); return; } @@ -1240,7 +1243,7 @@ namespace mongo { state = FAIL; errmsg = "_transferMods failed: "; errmsg += res.toString(); - error() << "_transferMods failed: " << res << endl; + error() << "_transferMods failed: " << res << migrateLog; conn.done(); return; } @@ -1261,7 +1264,7 @@ namespace mongo { break; if ( i > 100 ) { - warning() << "secondaries having hard time keeping up with migrate" << endl; + warning() << "secondaries having hard time keeping up with migrate" << migrateLog; } sleepmillis( 20 ); @@ -1269,7 +1272,7 @@ namespace mongo { if ( i == maxIterations ) { errmsg = "secondary can't keep up with migrate"; - error() << errmsg << endl; + error() << errmsg << migrateLog; conn.done(); state = FAIL; return; @@ -1287,7 +1290,7 @@ namespace mongo { while ( state == STEADY || state == COMMIT_START ) { BSONObj res; if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) { - log() << "_transferMods failed in STEADY state: " << res << endl; + log() << "_transferMods failed in STEADY state: " << res << migrateLog; errmsg = res.toString(); state = FAIL; conn.done(); @@ -1366,7 +1369,7 @@ namespace mongo { BSONObj fullObj; if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) { if ( ! isInRange( fullObj , min , max ) ) { - log() << "not applying out of range deletion: " << fullObj << endl; + log() << "not applying out of range deletion: " << fullObj << migrateLog; continue; } @@ -1407,17 +1410,17 @@ namespace mongo { bool flushPendingWrites( const ReplTime& lastOpApplied ) { if ( ! opReplicatedEnough( lastOpApplied ) ) { warning() << "migrate commit attempt timed out contacting " << slaveCount - << " slaves for '" << ns << "' " << min << " -> " << max << endl; + << " slaves for '" << ns << "' " << min << " -> " << max << migrateLog; return false; } - log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << endl; + log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << migrateLog; { readlock lk(ns); // commitNow() currently requires it // if durability is on, force a write to journal if ( getDur().commitNow() ) { - log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << endl; + log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << migrateLog; } } @@ -1450,7 +1453,7 @@ namespace mongo { if ( state == DONE ) return true; } - log() << "startCommit never finished!" << endl; + log() << "startCommit never finished!" << migrateLog; return false; } @@ -1577,7 +1580,7 @@ namespace mongo { assert( ! isInRange( BSON( "x" << 5 ) , min , max ) ); assert( ! isInRange( BSON( "x" << 6 ) , min , max ) ); - log(1) << "isInRangeTest passed" << endl; + log(1) << "isInRangeTest passed" << migrateLog; } } isInRangeTest; } |