summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/dbclient.cpp14
-rw-r--r--src/mongo/client/dbclient_rs.cpp20
-rw-r--r--src/mongo/client/distlock.cpp68
-rw-r--r--src/mongo/client/distlock_test.cpp6
-rw-r--r--src/mongo/client/model.cpp4
-rw-r--r--src/mongo/client/parallel.cpp34
-rw-r--r--src/mongo/db/btree.cpp4
-rw-r--r--src/mongo/db/btreebuilder.cpp6
-rw-r--r--src/mongo/db/cloner.cpp8
-rw-r--r--src/mongo/db/commands/isself.cpp12
-rw-r--r--src/mongo/db/commands/mr.cpp12
-rw-r--r--src/mongo/db/db.cpp8
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/dur.cpp2
-rw-r--r--src/mongo/db/dur_journal.cpp2
-rw-r--r--src/mongo/db/durop.cpp2
-rw-r--r--src/mongo/db/extsort.cpp8
-rw-r--r--src/mongo/db/geo/haystack.cpp2
-rw-r--r--src/mongo/db/index.cpp6
-rw-r--r--src/mongo/db/index_update.cpp12
-rw-r--r--src/mongo/db/instance.cpp10
-rw-r--r--src/mongo/db/oplog.cpp2
-rw-r--r--src/mongo/db/pdfile.cpp20
-rw-r--r--src/mongo/db/projection.cpp6
-rw-r--r--src/mongo/db/queryoptimizer.cpp2
-rw-r--r--src/mongo/db/repl.cpp28
-rw-r--r--src/mongo/db/repl/bgsync.cpp2
-rw-r--r--src/mongo/db/repl/consensus.cpp2
-rw-r--r--src/mongo/db/repl/manager.cpp2
-rw-r--r--src/mongo/db/repl/rs.cpp8
-rw-r--r--src/mongo/db/repl/rs_config.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp2
-rw-r--r--src/mongo/db/repl_block.cpp2
-rw-r--r--src/mongo/db/security_commands.cpp4
-rw-r--r--src/mongo/db/security_common.cpp2
-rw-r--r--src/mongo/dbtests/threadedtests.cpp44
-rw-r--r--src/mongo/s/chunk.cpp4
-rw-r--r--src/mongo/s/config.cpp8
-rw-r--r--src/mongo/s/cursors.cpp8
-rw-r--r--src/mongo/s/d_migrate.cpp2
-rw-r--r--src/mongo/s/d_split.cpp6
-rw-r--r--src/mongo/s/s_only.cpp2
-rw-r--r--src/mongo/s/server.cpp2
-rw-r--r--src/mongo/s/strategy_shard.cpp2
-rw-r--r--src/mongo/s/strategy_single.cpp2
-rw-r--r--src/mongo/scripting/engine.cpp2
-rw-r--r--src/mongo/scripting/engine_v8.cpp2
-rw-r--r--src/mongo/tools/dump.cpp4
-rw-r--r--src/mongo/tools/import.cpp6
-rw-r--r--src/mongo/tools/restore.cpp4
-rw-r--r--src/mongo/tools/tool.cpp2
-rw-r--r--src/mongo/unittest/unittest.cpp6
-rw-r--r--src/mongo/util/background.cpp4
-rw-r--r--src/mongo/util/file_allocator.cpp2
-rw-r--r--src/mongo/util/net/listen.cpp6
-rw-r--r--src/mongo/util/net/message_port.cpp6
-rw-r--r--src/mongo/util/net/miniwebserver.cpp2
-rw-r--r--src/mongo/util/net/sock.cpp18
-rw-r--r--src/mongo/util/paths.h2
-rw-r--r--src/mongo/util/version.cpp4
61 files changed, 249 insertions, 239 deletions
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index e9500dd539d..abedd8e2101 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -94,12 +94,12 @@ namespace mongo {
case MASTER: {
DBClientConnection * c = new DBClientConnection(true);
c->setSoTimeout( socketTimeout );
- log(1) << "creating new connection to:" << _servers[0] << endl;
+ LOG(1) << "creating new connection to:" << _servers[0] << endl;
if ( ! c->connect( _servers[0] , errmsg ) ) {
delete c;
return 0;
}
- log(1) << "connected connection!" << endl;
+ LOG(1) << "connected connection!" << endl;
return c;
}
@@ -769,22 +769,22 @@ namespace mongo {
throw SocketException( SocketException::FAILED_STATE , toString() );
lastReconnectTry = time(0);
- log(_logLevel) << "trying reconnect to " << _serverString << endl;
+ LOG(_logLevel) << "trying reconnect to " << _serverString << endl;
string errmsg;
_failed = false;
if ( ! _connect(errmsg) ) {
_failed = true;
- log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
+ LOG(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
throw SocketException( SocketException::CONNECT_ERROR , toString() );
}
- log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
+ LOG(_logLevel) << "reconnect " << _serverString << " ok" << endl;
for( map< string, pair<string,string> >::iterator i = authCache.begin(); i != authCache.end(); i++ ) {
const char *dbname = i->first.c_str();
const char *username = i->second.first.c_str();
const char *password = i->second.second.c_str();
if( !DBClientBase::auth(dbname, username, password, errmsg, false) )
- log(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
+ LOG(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
}
}
@@ -1015,7 +1015,7 @@ namespace mongo {
if ( ! runCommand( nsToDatabase( ns.c_str() ) ,
BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
info ) ) {
- log(_logLevel) << "dropIndex failed: " << info << endl;
+ LOG(_logLevel) << "dropIndex failed: " << info << endl;
uassert( 10007 , "dropIndex failed" , 0 );
}
resetIndexCache();
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 43e21a3001b..8e34bc55404 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -74,7 +74,7 @@ namespace mongo {
const ReplicaSetMonitor::Node& node = nodes[nextNodeIndex];
if (!node.ok) {
- log(2) << "dbclient_rs not selecting " << node << ", not currently ok" << endl;
+ LOG(2) << "dbclient_rs not selecting " << node << ", not currently ok" << endl;
continue;
}
@@ -88,7 +88,7 @@ namespace mongo {
if (node.isLocalSecondary(localThresholdMillis)) {
// found a local node. return early.
- log(2) << "dbclient_rs getSlave found local secondary for queries: "
+ LOG(2) << "dbclient_rs getSlave found local secondary for queries: "
<< nextNodeIndex << ", ping time: " << node.pingTimeMillis << endl;
*lastHost = fallbackHost;
return fallbackHost;
@@ -282,7 +282,7 @@ namespace mongo {
if ( createFromSeed ) {
map<string,vector<HostAndPort> >::const_iterator j = _seedServers.find( name );
if ( j != _seedServers.end() ) {
- log(4) << "Creating ReplicaSetMonitor from cached address" << endl;
+ LOG(4) << "Creating ReplicaSetMonitor from cached address" << endl;
ReplicaSetMonitorPtr& m = _sets[name];
verify( !m );
m.reset( new ReplicaSetMonitor( name, j->second ) );
@@ -334,7 +334,7 @@ namespace mongo {
}
void ReplicaSetMonitor::_remove_inlock( const string& name, bool clearSeedCache ) {
- log(2) << "Removing ReplicaSetMonitor for " << name << " from replica set table" << endl;
+ LOG(2) << "Removing ReplicaSetMonitor for " << name << " from replica set table" << endl;
_sets.erase( name );
if ( clearSeedCache ) {
_seedServers.erase( name );
@@ -459,21 +459,21 @@ namespace mongo {
return fallbackNode;
else if ( _nodes[ _nextSlave ].isLocalSecondary( _localThresholdMillis ) ) {
// found a local slave. return early.
- log(2) << "dbclient_rs getSlave found local secondary for queries: "
+ LOG(2) << "dbclient_rs getSlave found local secondary for queries: "
<< _nextSlave << ", ping time: "
<< _nodes[ _nextSlave ].pingTimeMillis << endl;
return fallbackNode;
}
}
else
- log(2) << "dbclient_rs getSlave not selecting " << _nodes[_nextSlave]
+ LOG(2) << "dbclient_rs getSlave not selecting " << _nodes[_nextSlave]
<< ", not currently okForSecondaryQueries" << endl;
}
}
if ( ! fallbackNode.empty() ) {
// use a non-local secondary, even if local was preferred
- log(1) << "dbclient_rs getSlave falling back to a non-local secondary node" << endl;
+ LOG(1) << "dbclient_rs getSlave falling back to a non-local secondary node" << endl;
return fallbackNode;
}
@@ -482,7 +482,7 @@ namespace mongo {
_master < static_cast<int>(_nodes.size()) && _nodes[_master].ok);
// Fall back to primary
- log(1) << "dbclient_rs getSlave no member in secondary state found, "
+ LOG(1) << "dbclient_rs getSlave no member in secondary state found, "
"returning primary " << _nodes[ _master ] << endl;
return _nodes[_master].addr;
}
@@ -734,7 +734,7 @@ namespace mongo {
node.lastIsMaster = o.copy();
}
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << conn->toString()
+ LOG( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << conn->toString()
<< ' ' << o << endl;
// add other nodes
@@ -757,7 +757,7 @@ namespace mongo {
}
catch ( std::exception& e ) {
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception "
+ LOG( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception "
<< conn->toString() << ' ' << e.what() << endl;
errorOccured = true;
diff --git a/src/mongo/client/distlock.cpp b/src/mongo/client/distlock.cpp
index 5413cbc6e10..5cfc203b07f 100644
--- a/src/mongo/client/distlock.cpp
+++ b/src/mongo/client/distlock.cpp
@@ -87,14 +87,14 @@ namespace mongo {
string pingId = pingThreadId( addr, process );
- log( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
+ LOG( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
<< " and process " << process
<< " (sleeping for " << sleepTime << "ms)" << endl;
static int loops = 0;
while( ! inShutdown() && ! shouldKill( addr, process ) ) {
- log( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
+ LOG( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
Date_t pingTime;
@@ -157,7 +157,7 @@ namespace mongo {
conn->ensureIndex( DistributedLock::lockPingNS , BSON( "ping" << 1 ) );
}
- log( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
+ LOG( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
<< " by distributed lock pinger '" << pingId
<< "', sleeping for " << sleepTime << "ms" << endl;
@@ -167,7 +167,7 @@ namespace mongo {
int numOldLocks = _oldLockOIDs.size();
if( numOldLocks > 0 )
- log( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
bool removed = false;
for( list<OID>::iterator i = _oldLockOIDs.begin(); i != _oldLockOIDs.end();
@@ -181,11 +181,11 @@ namespace mongo {
// Either the update went through or it didn't, either way we're done trying to
// unlock
- log( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
removed = true;
}
catch( UpdateNotTheSame& ) {
- log( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
removed = true;
}
catch ( std::exception& e) {
@@ -196,7 +196,7 @@ namespace mongo {
}
if( numOldLocks > 0 && _oldLockOIDs.size() > 0 ){
- log( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
}
conn.done();
@@ -324,7 +324,7 @@ namespace mongo {
_lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ), _maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ), _lockPing( _maxClockSkew ),
_mutex( "DistributedLock" )
{
- log( logLvl - 1 ) << "created new distributed lock for " << name << " on " << conn
+ LOG( logLvl - 1 ) << "created new distributed lock for " << name << " on " << conn
<< " ( lock timeout : " << _lockTimeout
<< ", ping interval : " << _lockPing << ", process : " << asProcess << " )" << endl;
@@ -432,7 +432,7 @@ namespace mongo {
// Skew is how much time we'd have to add to local to get to remote
avgSkews[s] += (long long) (remote - local);
- log( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
+ LOG( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
}
}
@@ -464,11 +464,11 @@ namespace mongo {
// Make sure our max skew is not more than our pre-set limit
if(totalSkew > (long long) maxClockSkew) {
- log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
+ LOG( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
return false;
}
- log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
+ LOG( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
return true;
}
@@ -517,7 +517,7 @@ namespace mongo {
// Case 1: No locks
if ( o.isEmpty() ) {
try {
- log( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
+ LOG( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
conn->insert( locksNS , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
}
catch ( UserException& e ) {
@@ -532,10 +532,10 @@ namespace mongo {
bool canReenter = reenter && o["process"].String() == _processId && ! distLockPinger.willUnlockOID( o["ts"].OID() ) && o["state"].numberInt() == 2;
if( reenter && ! canReenter ) {
- log( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
- if( o["process"].String() != _processId ) log( logLvl - 1 ) << ", different process " << _processId << endl;
- else if( o["state"].numberInt() == 2 ) log( logLvl - 1 ) << ", state not finalized" << endl;
- else log( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
+ LOG( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
+ if( o["process"].String() != _processId ) LOG( logLvl - 1 ) << ", different process " << _processId << endl;
+ else if( o["state"].numberInt() == 2 ) LOG( logLvl - 1 ) << ", state not finalized" << endl;
+ else LOG( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
// reset since we've been bounced by a previous lock not being where we thought it was,
// and should go through full forcing process if required.
@@ -546,7 +546,7 @@ namespace mongo {
BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
if ( lastPing.isEmpty() ) {
- log( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
+ LOG( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
// TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
lastPing = BSON( "_id" << o["process"].String() << "ping" << (Date_t) 0 );
}
@@ -555,7 +555,7 @@ namespace mongo {
unsigned long long takeover = _lockTimeout;
PingData _lastPingCheck = getLastPing();
- log( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.id << " and ping " << _lastPingCheck.lastPing << endl;
+ LOG( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.id << " and ping " << _lastPingCheck.lastPing << endl;
try {
@@ -592,17 +592,17 @@ namespace mongo {
}
if ( elapsed <= takeover && ! canReenter ) {
- log( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
+ LOG( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
*other = o; other->getOwned(); conn.done();
return false;
}
else if( elapsed > takeover && canReenter ) {
- log( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
+ LOG( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
*other = o; other->getOwned(); conn.done();
return false;
}
- log( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
+ LOG( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
<< ( canReenter ? "re-entering is allowed, " : "" )
<< "elapsed time " << elapsed << " > takeover time " << takeover << endl;
@@ -631,7 +631,7 @@ namespace mongo {
// TODO: Clean up all the extra code to exit this method, probably with a refactor
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
- ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
+ ( errMsg.empty() ? LOG( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
<< ( !errMsg.empty() ? causedBy(errMsg) : string("(another force won)") ) << endl;
*other = o; other->getOwned(); conn.done();
return false;
@@ -673,7 +673,7 @@ namespace mongo {
// TODO: Clean up all the extra code to exit this method, probably with a refactor
if ( ! errMsg.empty() || ! err["n"].type() || err["n"].numberInt() < 1 ) {
- ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
+ ( errMsg.empty() ? LOG( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
<< ( !errMsg.empty() ? causedBy(errMsg) : string("(not sure lock is held)") )
<< " gle: " << err
<< endl;
@@ -694,14 +694,14 @@ namespace mongo {
<< lockName << causedBy( e ), 13660);
}
- log( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
+ LOG( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
*other = o.getOwned();
conn.done();
return true;
}
- log( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
+ LOG( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
// We don't need the ts value in the query, since we will only ever replace locks with state=0.
}
@@ -730,7 +730,7 @@ namespace mongo {
// Main codepath to acquire lock
- log( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
+ LOG( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
<< lockDetails.jsonString(Strict, true) << "\n"
<< query.jsonString(Strict, true) << endl;
@@ -742,7 +742,7 @@ namespace mongo {
currLock = conn->findOne( locksNS , _id );
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
- ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
+ ( errMsg.empty() ? LOG( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
<< ( !errMsg.empty() ? causedBy( errMsg ) : string("(another update won)") ) << endl;
*other = currLock;
other->getOwned();
@@ -821,11 +821,11 @@ namespace mongo {
// Locks on all servers are now set and safe until forcing
if ( currLock["ts"] == lockDetails["ts"] ) {
- log( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
+ LOG( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
gotLock = true;
}
else {
- log( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
+ LOG( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
// Register the lock for deletion, to speed up failover
// Not strictly necessary, but helpful
@@ -894,9 +894,9 @@ namespace mongo {
// Log our lock results
if(gotLock)
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
else
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
conn.done();
@@ -951,12 +951,12 @@ namespace mongo {
continue;
}
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
conn.done();
return;
}
catch( UpdateNotTheSame& ) {
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
conn.done();
break;
}
@@ -972,7 +972,7 @@ namespace mongo {
if( attempted > maxAttempts && ! oldLock.isEmpty() && ! oldLock["ts"].eoo() ) {
- log( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
+ LOG( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
<< ", will attempt again later" << endl;
// We couldn't unlock the lock at all, so try again later in the pinging thread...
diff --git a/src/mongo/client/distlock_test.cpp b/src/mongo/client/distlock_test.cpp
index 412a80e6e42..0dbe8366cb8 100644
--- a/src/mongo/client/distlock_test.cpp
+++ b/src/mongo/client/distlock_test.cpp
@@ -363,11 +363,11 @@ namespace mongo {
bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
}
else {
- log( logLvl ) << "No host clocks to skew." << endl;
+ LOG( logLvl ) << "No host clocks to skew." << endl;
return;
}
- log( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
+ LOG( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
unsigned s = 0;
for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
@@ -385,7 +385,7 @@ namespace mongo {
uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
- log( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
+ LOG( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
}
catch(...) {
conn->done();
diff --git a/src/mongo/client/model.cpp b/src/mongo/client/model.cpp
index 4b1447f03f8..4a66625da39 100644
--- a/src/mongo/client/model.cpp
+++ b/src/mongo/client/model.cpp
@@ -94,7 +94,7 @@ namespace mongo {
conn->get()->insert( getNS() , o );
_id = o["_id"].wrap().getOwned();
- log(4) << "inserted new model " << getNS() << " " << o << endl;
+ LOG(4) << "inserted new model " << getNS() << " " << o << endl;
}
else {
if ( myId.eoo() ) {
@@ -110,7 +110,7 @@ namespace mongo {
BSONObj q = qb.obj();
BSONObj o = b.obj();
- log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
+ LOG(4) << "updated model" << getNS() << " " << q << " " << o << endl;
conn->get()->update( getNS() , q , o , true );
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 9158d3eeced..e6adb49ede1 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -737,7 +737,7 @@ namespace mongo {
// It's actually okay if we set the version here, since either the
// manager will be verified as compatible, or if the manager doesn't
// exist, we don't care about version consistency
- log( pc ) << "needed to set remote version on connection to value "
+ LOG( pc ) << "needed to set remote version on connection to value "
<< "compatible with " << vinfo << endl;
}
} catch ( const DBException& dbEx ) {
@@ -776,7 +776,7 @@ namespace mongo {
string prefix;
if( _totalTries > 0 ) prefix = str::stream() << "retrying (" << _totalTries << " tries)";
else prefix = "creating";
- log( pc ) << prefix << " pcursor over " << _qSpec << " and " << _cInfo << endl;
+ LOG( pc ) << prefix << " pcursor over " << _qSpec << " and " << _cInfo << endl;
set<Shard> todoStorage;
set<Shard>& todo = todoStorage;
@@ -799,7 +799,8 @@ namespace mongo {
// Close all cursors on extra shards first, as these will be invalid
for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
- log( pc ) << "closing cursor on shard " << i->first << " as the connection is no longer required by " << vinfo << endl;
+ LOG( pc ) << "closing cursor on shard " << i->first
+ << " as the connection is no longer required by " << vinfo << endl;
// Force total cleanup of these connections
if( todo.find( i->first ) == todo.end() ) i->second.cleanup();
@@ -815,7 +816,8 @@ namespace mongo {
verify( todo.size() );
- log( pc ) << "initializing over " << todo.size() << " shards required by " << vinfo << endl;
+ LOG( pc ) << "initializing over " << todo.size()
+ << " shards required by " << vinfo << endl;
// Don't retry indefinitely for whatever reason
_totalTries++;
@@ -826,7 +828,8 @@ namespace mongo {
const Shard& shard = *i;
PCMData& mdata = _cursorMap[ shard ];
- log( pc ) << "initializing on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ LOG( pc ) << "initializing on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
// This may be the first time connecting to this shard, if so we can get an error here
try {
@@ -951,8 +954,9 @@ namespace mongo {
}
- log( pc ) << "initialized " << ( isCommand() ? "command " : "query " ) << ( lazyInit ? "(lazily) " : "(full) " ) << "on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
-
+ LOG( pc ) << "initialized " << ( isCommand() ? "command " : "query " )
+ << ( lazyInit ? "(lazily) " : "(full) " ) << "on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
}
catch( StaleConfigException& e ){
@@ -967,7 +971,9 @@ namespace mongo {
_markStaleNS( staleNS, e, forceReload, fullReload );
int logLevel = fullReload ? 0 : 1;
- log( pc + logLevel ) << "stale config of ns " << staleNS << " during initialization, will retry with forced : " << forceReload << ", full : " << fullReload << causedBy( e ) << endl;
+ LOG( pc + logLevel ) << "stale config of ns "
+ << staleNS << " during initialization, will retry with forced : "
+ << forceReload << ", full : " << fullReload << causedBy( e ) << endl;
// This is somewhat strange
if( staleNS != ns )
@@ -1044,14 +1050,15 @@ namespace mongo {
bool retry = false;
map< string, StaleConfigException > staleNSExceptions;
- log( pc ) << "finishing over " << _cursorMap.size() << " shards" << endl;
+ LOG( pc ) << "finishing over " << _cursorMap.size() << " shards" << endl;
for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
const Shard& shard = i->first;
PCMData& mdata = i->second;
- log( pc ) << "finishing on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ LOG( pc ) << "finishing on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
// Ignore empty conns for now
if( ! mdata.pcState ) continue;
@@ -1102,7 +1109,8 @@ namespace mongo {
// Finalize state
state->cursor->attach( state->conn.get() ); // Closes connection for us
- log( pc ) << "finished on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ LOG( pc ) << "finished on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
}
}
catch( RecvStaleConfigException& e ){
@@ -1160,7 +1168,9 @@ namespace mongo {
_markStaleNS( staleNS, exception, forceReload, fullReload );
int logLevel = fullReload ? 0 : 1;
- log( pc + logLevel ) << "stale config of ns " << staleNS << " on finishing query, will retry with forced : " << forceReload << ", full : " << fullReload << causedBy( exception ) << endl;
+ LOG( pc + logLevel ) << "stale config of ns "
+ << staleNS << " on finishing query, will retry with forced : "
+ << forceReload << ", full : " << fullReload << causedBy( exception ) << endl;
// This is somewhat strange
if( staleNS != ns )
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
index 56b0719d677..00125e0af1c 100644
--- a/src/mongo/db/btree.cpp
+++ b/src/mongo/db/btree.cpp
@@ -1706,7 +1706,7 @@ namespace mongo {
if ( found ) {
const _KeyNode& kn = k(pos);
if ( kn.isUnused() ) {
- log(4) << "btree _insert: reusing unused key" << endl;
+ LOG(4) << "btree _insert: reusing unused key" << endl;
c.b = this;
c.pos = pos;
c.op = IndexInsertionContinuation::SetUsed;
@@ -1761,7 +1761,7 @@ namespace mongo {
if ( found ) {
const _KeyNode& kn = k(pos);
if ( kn.isUnused() ) {
- log(4) << "btree _insert: reusing unused key" << endl;
+ LOG(4) << "btree _insert: reusing unused key" << endl;
massert( 10285 , "_insert: reuse key but lchild is not null", lChild.isNull());
massert( 10286 , "_insert: reuse key but rchild is not null", rChild.isNull());
kn.writing().setUsed();
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
index 47611099700..46d04659a78 100644
--- a/src/mongo/db/btreebuilder.cpp
+++ b/src/mongo/db/btreebuilder.cpp
@@ -149,7 +149,7 @@ namespace mongo {
}
if( levels > 1 )
- log(2) << "btree levels: " << levels << endl;
+ LOG(2) << "btree levels: " << levels << endl;
}
/** when all addKeys are done, we then build the higher levels of the tree */
@@ -163,7 +163,7 @@ namespace mongo {
BtreeBuilder<V>::~BtreeBuilder() {
DESTRUCTOR_GUARD(
if( !committed ) {
- log(2) << "Rolling back partially built index space" << endl;
+ LOG(2) << "Rolling back partially built index space" << endl;
DiskLoc x = first;
while( !x.isNull() ) {
DiskLoc next = x.btree<V>()->tempNext();
@@ -173,7 +173,7 @@ namespace mongo {
getDur().commitIfNeeded();
}
verify( idx.head.isNull() );
- log(2) << "done rollback" << endl;
+ LOG(2) << "done rollback" << endl;
}
)
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index ed2f0f91d40..e08adaa6940 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -381,7 +381,7 @@ namespace mongo {
while ( c->more() ) {
BSONObj collection = c->next();
- log(2) << "\t cloner got " << collection << endl;
+ LOG(2) << "\t cloner got " << collection << endl;
BSONElement e = collection.getField("name");
if ( e.eoo() ) {
@@ -396,7 +396,7 @@ namespace mongo {
/* system.users and s.js is cloned -- but nothing else from system.
* system.indexes is handled specially at the end*/
if( legalClientSystemNS( from_name , true ) == 0 ) {
- log(2) << "\t\t not cloning because system collection" << endl;
+ LOG(2) << "\t\t not cloning because system collection" << endl;
continue;
}
}
@@ -424,7 +424,7 @@ namespace mongo {
dbtempreleaseif r( opts.mayYield );
}
BSONObj collection = *i;
- log(2) << " really will clone: " << collection << endl;
+ LOG(2) << " really will clone: " << collection << endl;
const char * from_name = collection["name"].valuestr();
BSONObj options = collection.getObjectField("options");
@@ -440,7 +440,7 @@ namespace mongo {
/* we defer building id index for performance - building it in batch is much faster */
userCreateNS(toname, options, err, opts.logForRepl, &wantIdIndex);
}
- log(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
+ LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
Query q;
if( opts.snapshot )
q.snapshot();
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index ac526ba7b33..2e06a85d197 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -89,11 +89,11 @@ namespace mongo {
addrs = NULL;
if (logLevel >= 1) {
- log(1) << "getMyAddrs():";
+ LOG(1) << "getMyAddrs():";
for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
- log(1) << " [" << *it << ']';
+ LOG(1) << " [" << *it << ']';
}
- log(1) << endl;
+ LOG(1) << endl;
}
return out;
@@ -133,11 +133,11 @@ namespace mongo {
freeaddrinfo(addrs);
if (logLevel >= 1) {
- log(1) << "getallIPs(\"" << iporhost << "\"):";
+ LOG(1) << "getallIPs(\"" << iporhost << "\"):";
for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
- log(1) << " [" << *it << ']';
+ LOG(1) << " [" << *it << ']';
}
- log(1) << endl;
+ LOG(1) << endl;
}
return out;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 65b316c6543..7d6217d0f37 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -666,7 +666,7 @@ namespace mongo {
}
void State::bailFromJS() {
- log(1) << "M/R: Switching from JS mode to mixed mode" << endl;
+ LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl;
// reduce and reemit into c++
switchMode(false);
@@ -824,7 +824,7 @@ namespace mongo {
{
dbtempreleasecond tl;
if ( ! tl.unlocked() )
- log( LL_WARNING ) << "map/reduce can't temp release" << endl;
+ LOG( LL_WARNING ) << "map/reduce can't temp release" << endl;
// reduce and finalize last array
finalReduce( all );
}
@@ -934,7 +934,7 @@ namespace mongo {
// reduce now to lower mem usage
Timer t;
_scope->invoke(_reduceAll, 0, 0, 0, true);
- log(1) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() << "ms" << endl;
+ LOG(1) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() << "ms" << endl;
return;
}
}
@@ -947,12 +947,12 @@ namespace mongo {
long oldSize = _size;
Timer t;
reduceInMemory();
- log(1) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
+ LOG(1) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
// if size is still high, or values are not reducing well, dump
if ( _onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2) ) {
dumpToInc();
- log(1) << " MR - dumping to db" << endl;
+ LOG(1) << " MR - dumping to db" << endl;
}
}
}
@@ -1022,7 +1022,7 @@ namespace mongo {
Config config( dbname , cmd );
- log(1) << "mr ns: " << config.ns << endl;
+ LOG(1) << "mr ns: " << config.ns << endl;
uassert( 16149 , "cannot run map reduce without the js engine", globalScriptEngine );
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index f7d73246f38..72a2fb5cef2 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -291,7 +291,7 @@ namespace mongo {
static void repairDatabasesAndCheckVersion() {
// LastError * le = lastError.get( true );
Client::GodScope gs;
- log(1) << "enter repairDatabases (to check pdfile version #)" << endl;
+ LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl;
//verify(checkNsFilesOnLoad);
checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
@@ -301,7 +301,7 @@ namespace mongo {
getDatabaseNames( dbNames );
for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
string dbName = *i;
- log(1) << "\t" << dbName << endl;
+ LOG(1) << "\t" << dbName << endl;
Client::Context ctx( dbName );
MongoDataFile *p = cc().database()->getFile( 0 );
DataFileHeader *h = p->getHeader();
@@ -337,7 +337,7 @@ namespace mongo {
}
}
- log(1) << "done repairDatabases" << endl;
+ LOG(1) << "done repairDatabases" << endl;
if ( shouldRepairDatabases ) {
log() << "finished checking dbs" << endl;
@@ -410,7 +410,7 @@ namespace mongo {
else if( cmdLine.syncdelay == 1 )
log() << "--syncdelay 1" << endl;
else if( cmdLine.syncdelay != 60 )
- log(1) << "--syncdelay " << cmdLine.syncdelay << endl;
+ LOG(1) << "--syncdelay " << cmdLine.syncdelay << endl;
int time_flushing = 0;
while ( ! inShutdown() ) {
_diaglog.flush();
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 99e2144ac5e..0e8020f0eb2 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -970,7 +970,7 @@ namespace mongo {
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
- log(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
+ LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
}
@@ -1580,8 +1580,8 @@ namespace mongo {
DiskLoc extent = nsd->firstExtent;
for( ; excessSize > extent.ext()->length && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
excessSize -= extent.ext()->length;
- log( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
- log( 6 ) << "excessSize: " << excessSize << endl;
+ LOG( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
+ LOG( 6 ) << "excessSize: " << excessSize << endl;
}
DiskLoc startLoc = extent.ext()->firstRecord;
@@ -1895,7 +1895,7 @@ namespace mongo {
}
}
catch ( SendStaleConfigException& e ){
- log(1) << "command failed because of stale config, can retry" << causedBy( e ) << endl;
+ LOG(1) << "command failed because of stale config, can retry" << causedBy( e ) << endl;
throw;
}
catch ( DBException& e ) {
@@ -1993,7 +1993,7 @@ namespace mongo {
}
if ( c->adminOnly() )
- log( 2 ) << "command: " << cmdObj << endl;
+ LOG( 2 ) << "command: " << cmdObj << endl;
if (c->maintenanceMode() && theReplSet && theReplSet->isSecondary()) {
theReplSet->setMaintenanceMode(true);
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 05ec2db680f..ad14417027a 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -398,7 +398,7 @@ namespace mongo {
_out = new ofstream();
_out->open( _file.string().c_str() , ios_base::out | ios_base::binary );
if ( ! _out->good() ) {
- log( LL_WARNING ) << "couldn't create file: " << _file.string() << " for remove saving" << endl;
+ LOG( LL_WARNING ) << "couldn't create file: " << _file.string() << " for remove saving" << endl;
delete _out;
_out = 0;
return;
diff --git a/src/mongo/db/dur.cpp b/src/mongo/db/dur.cpp
index 5e65cf97866..e0cd17c8f1c 100644
--- a/src/mongo/db/dur.cpp
+++ b/src/mongo/db/dur.cpp
@@ -287,7 +287,7 @@ namespace mongo {
return false;
}
- log(1) << "commitIfNeeded upgrading from shared write to exclusive write state"
+ LOG(1) << "commitIfNeeded upgrading from shared write to exclusive write state"
<< endl;
Lock::DBWrite::UpgradeToExclusive ex;
if (ex.gotUpgrade()) {
diff --git a/src/mongo/db/dur_journal.cpp b/src/mongo/db/dur_journal.cpp
index 290b7fe466b..10043f74bb8 100644
--- a/src/mongo/db/dur_journal.cpp
+++ b/src/mongo/db/dur_journal.cpp
@@ -222,7 +222,7 @@ namespace mongo {
flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
- log(1) << "removeJournalFiles end" << endl;
+ LOG(1) << "removeJournalFiles end" << endl;
}
/** at clean shutdown */
diff --git a/src/mongo/db/durop.cpp b/src/mongo/db/durop.cpp
index b0039c93b74..400884bfb50 100644
--- a/src/mongo/db/durop.cpp
+++ b/src/mongo/db/durop.cpp
@@ -124,7 +124,7 @@ namespace mongo {
boost::filesystem::remove(full);
}
catch(std::exception& e) {
- log(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
+ LOG(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
}
}
diff --git a/src/mongo/db/extsort.cpp b/src/mongo/db/extsort.cpp
index 489dbaa959c..8152f32b7ba 100644
--- a/src/mongo/db/extsort.cpp
+++ b/src/mongo/db/extsort.cpp
@@ -82,7 +82,7 @@ namespace mongo {
rootpath << "_tmp/esort." << time(0) << "." << thisUniqueNumber << "/";
_root = rootpath.str();
- log(1) << "external sort root: " << _root.string() << endl;
+ LOG(1) << "external sort root: " << _root.string() << endl;
create_directories( _root );
_compares = 0;
@@ -113,7 +113,7 @@ namespace mongo {
if ( _cur && _files.size() == 0 ) {
_sortInMem();
- log(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
+ LOG(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
return;
}
@@ -147,7 +147,7 @@ namespace mongo {
if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ) {
finishMap();
- log(1) << "finishing map" << endl;
+ LOG(1) << "finishing map" << endl;
}
}
@@ -187,7 +187,7 @@ namespace mongo {
_files.push_back( file );
out.close();
- log(2) << "Added file: " << file << " with " << num << "objects for external sort" << endl;
+ LOG(2) << "Added file: " << file << " with " << num << "objects for external sort" << endl;
}
// ---------------------------------
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 0a5f6121f7a..72033dc6a82 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -178,7 +178,7 @@ namespace mongo {
BSONObjBuilder& result, unsigned limit) {
Timer t;
- log(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance
+ LOG(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance
<< " search: " << search << endl;
int x, y;
{
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 274c3aa37d9..768294861c5 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -206,7 +206,7 @@ namespace mongo {
dropNS(ns.c_str());
}
catch(DBException& ) {
- log(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
+ LOG(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
}
head.setInvalid();
info.setInvalid();
@@ -317,7 +317,7 @@ namespace mongo {
}
if ( sourceNS.empty() || key.isEmpty() ) {
- log(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
+ LOG(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
sourceNS << "\n idxobj:" << io.toString() << endl;
string s = "bad add index attempt " + sourceNS + " key:" + key.toString();
uasserted(12504, s);
@@ -341,7 +341,7 @@ namespace mongo {
return false;
}
if( sourceCollection->findIndexByKeyPattern(key) >= 0 ) {
- log(2) << "index already exists with diff name " << name << ' ' << key.toString() << endl;
+ LOG(2) << "index already exists with diff name " << name << ' ' << key.toString() << endl;
return false;
}
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index 23a28a3fc99..74cdd6826eb 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -171,7 +171,7 @@ namespace mongo {
_unindexRecord(d->idx(j), obj, loc, false);
}
catch(...) {
- log(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
+ LOG(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
}
}
throw;
@@ -263,7 +263,7 @@ namespace mongo {
}
pm.finished();
op->setMessage( "index: (3/3) btree-middle" );
- log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
+ LOG(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
btBuilder.commit();
if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
warning() << "not all entries were added to the index, probably some keys were too large" << endl;
@@ -321,7 +321,7 @@ namespace mongo {
phase1->sorter->sort();
if ( logLevel > 1 ) printMemInfo( "after final sort" );
- log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
+ LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
set<DiskLoc> dupsToDrop;
@@ -531,7 +531,7 @@ namespace mongo {
_unindexRecord(d->idx(j), obj, loc, false);
}
catch(...) {
- log(3) << "unindex fails on rollback after unique failure\n";
+ LOG(3) << "unindex fails on rollback after unique failure\n";
}
}
throw;
@@ -605,7 +605,7 @@ namespace mongo {
// delete a specific index or all?
if ( *name == '*' && name[1] == 0 ) {
- log(4) << " d->nIndexes was " << d->nIndexes << '\n';
+ LOG(4) << " d->nIndexes was " << d->nIndexes << '\n';
anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
IndexDetails *idIndex = 0;
if( d->nIndexes ) {
@@ -634,7 +634,7 @@ namespace mongo {
// delete just one index
int x = d->findIndexByName(name);
if ( x >= 0 ) {
- log(4) << " d->nIndexes was " << d->nIndexes << endl;
+ LOG(4) << " d->nIndexes was " << d->nIndexes << endl;
anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
/* note it is important we remove the IndexDetails with this
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 26593ba65a0..fc5706bcca7 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -471,10 +471,10 @@ namespace mongo {
if ( currentOp.shouldDBProfile( debug.executionTime ) ) {
// performance profiling is on
if ( Lock::isReadLocked() ) {
- mongo::log(1) << "note: not profiling because recursive read lock" << endl;
+ LOG(1) << "note: not profiling because recursive read lock" << endl;
}
else if ( lockedForWriting() ) {
- mongo::log(1) << "note: not profiling because doing fsync+lock" << endl;
+ LOG(1) << "note: not profiling because doing fsync+lock" << endl;
}
else {
try {
@@ -509,14 +509,14 @@ namespace mongo {
uassert( 13004 , str::stream() << "sent negative cursors to kill: " << n , n >= 1 );
if ( n > 2000 ) {
- log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
verify( n < 30000 );
}
int found = ClientCursor::erase(n, (long long *) x);
if ( logLevel > 0 || found != n ) {
- log( found == n ) << "killcursors: found " << found << " of " << n << endl;
+ LOG( found == n ) << "killcursors: found " << found << " of " << n << endl;
}
}
@@ -1081,7 +1081,7 @@ namespace mongo {
#endif
// block the dur thread from doing any work for the rest of the run
- log(2) << "shutdown: groupCommitMutex" << endl;
+ LOG(2) << "shutdown: groupCommitMutex" << endl;
SimpleMutex::scoped_lock lk(dur::commitJob.groupCommitMutex);
#ifdef _WIN32
diff --git a/src/mongo/db/oplog.cpp b/src/mongo/db/oplog.cpp
index a8ba73f2b4d..ec69d3e12a9 100644
--- a/src/mongo/db/oplog.cpp
+++ b/src/mongo/db/oplog.cpp
@@ -224,7 +224,7 @@ namespace mongo {
append_O_Obj(r->data(), partial, obj);
if ( logLevel >= 6 ) {
- log( 6 ) << "logOp:" << BSONObj::make(r) << endl;
+ LOG( 6 ) << "logOp:" << BSONObj::make(r) << endl;
}
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 6e1b1825ff5..0f49c44c009 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -169,7 +169,7 @@ namespace mongo {
void ensureIdIndexForNewNs(const char *ns) {
if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
strstr( ns, FREELIST_NS ) == 0 ) {
- log( 1 ) << "adding _id index for collection " << ns << endl;
+ LOG( 1 ) << "adding _id index for collection " << ns << endl;
ensureHaveIdIndex( ns );
}
}
@@ -239,7 +239,7 @@ namespace mongo {
}
bool _userCreateNS(const char *ns, const BSONObj& options, string& err, bool *deferIdIndex) {
- log(1) << "create collection " << ns << ' ' << options << endl;
+ LOG(1) << "create collection " << ns << ' ' << options << endl;
if ( nsdetails(ns) ) {
err = "collection already exists";
@@ -608,7 +608,7 @@ namespace mongo {
}
}
- if( n > 128 ) log( n < 512 ) << "warning: newExtent " << n << " scanned\n";
+ if( n > 128 ) LOG( n < 512 ) << "warning: newExtent " << n << " scanned\n";
if( best ) {
Extent *e = best;
@@ -979,7 +979,7 @@ namespace mongo {
}
void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result ) {
- log(1) << "dropCollection: " << name << endl;
+ LOG(1) << "dropCollection: " << name << endl;
NamespaceDetails *d = nsdetails(name.c_str());
if( d == 0 )
return;
@@ -998,7 +998,7 @@ namespace mongo {
}
verify( d->nIndexes == 0 );
}
- log(1) << "\t dropIndexes done" << endl;
+ LOG(1) << "\t dropIndexes done" << endl;
result.append("ns", name.c_str());
ClientCursor::invalidate(name.c_str());
Top::global.collectionDropped( name );
@@ -1328,7 +1328,7 @@ namespace mongo {
NOINLINE_DECL DiskLoc outOfSpace(const char *ns, NamespaceDetails *d, int lenWHdr, bool god, DiskLoc extentLoc) {
DiskLoc loc;
if ( ! d->isCapped() ) { // size capped doesn't grow
- log(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor() << " lenWHdr: " << lenWHdr << endl;
+ LOG(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor() << " lenWHdr: " << lenWHdr << endl;
cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
loc = d->alloc(ns, lenWHdr, extentLoc);
if ( loc.isNull() ) {
@@ -1690,7 +1690,7 @@ namespace mongo {
}
void dropDatabase(const std::string& db) {
- log(1) << "dropDatabase " << db << endl;
+ LOG(1) << "dropDatabase " << db << endl;
Lock::assertWriteLocked(db);
Database *d = cc().database();
verify( d );
@@ -1906,7 +1906,7 @@ namespace mongo {
bool ok = false;
MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply( q ) );
if ( ok )
- log(2) << fo.op() << " file " << q.string() << endl;
+ LOG(2) << fo.op() << " file " << q.string() << endl;
int i = 0;
int extra = 10; // should not be necessary, this is defensive in case there are missing files
while ( 1 ) {
@@ -1917,7 +1917,7 @@ namespace mongo {
MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply(q) );
if ( ok ) {
if ( extra != 10 ) {
- log(1) << fo.op() << " file " << q.string() << endl;
+ LOG(1) << fo.op() << " file " << q.string() << endl;
log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
}
}
@@ -1950,7 +1950,7 @@ namespace mongo {
int nNotClosed = 0;
for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) {
string name = *i;
- log(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl;
+ LOG(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl;
Client::Context ctx( name , path );
if( !force && BackgroundOperation::inProgForDb(name.c_str()) ) {
log() << "WARNING: can't close database " << name << " because a bg job is in progress - try killOp command" << endl;
diff --git a/src/mongo/db/projection.cpp b/src/mongo/db/projection.cpp
index ad875605916..c4401a3003c 100644
--- a/src/mongo/db/projection.cpp
+++ b/src/mongo/db/projection.cpp
@@ -176,7 +176,7 @@ namespace mongo {
MatchDetails arrayDetails;
arrayDetails.requestElemMatchKey();
if ( matcher->second->matches( in, &arrayDetails ) ) {
- log(4) << "Matched array on field: " << matcher->first << endl
+ LOG(4) << "Matched array on field: " << matcher->first << endl
<< " from array: " << in.getField( matcher->first ) << endl
<< " in object: " << in << endl
<< " at position: " << arrayDetails.elemMatchKey() << endl;
@@ -282,7 +282,7 @@ namespace mongo {
if ( details && arrayOpType == ARRAY_OP_POSITIONAL ) {
// $ positional operator specified
- log(4) << "projection: checking if element " << e << " matched spec: "
+ LOG(4) << "projection: checking if element " << e << " matched spec: "
<< getSpec() << " match details: " << *details << endl;
uassert( 16352, mongoutils::str::stream() << "positional operator ("
<< e.fieldName()
@@ -333,7 +333,7 @@ namespace mongo {
mongoutils::str::before( projectionElement.fieldName(), "." ) ) {
// found query spec that matches positional array projection spec
- log(4) << "Query specifies field named for positional operator: "
+ LOG(4) << "Query specifies field named for positional operator: "
<< queryElement.fieldName() << endl;
return;
}
diff --git a/src/mongo/db/queryoptimizer.cpp b/src/mongo/db/queryoptimizer.cpp
index 78f3fd595f7..40855fbfcd2 100644
--- a/src/mongo/db/queryoptimizer.cpp
+++ b/src/mongo/db/queryoptimizer.cpp
@@ -1342,7 +1342,7 @@ doneCheckOrder:
massert( 10369 , "no plans", _plans.plans().size() > 0 );
if ( _plans.plans().size() > 1 )
- log(1) << " running multiple plans" << endl;
+ LOG(1) << " running multiple plans" << endl;
for( QueryPlanSet::PlanVector::const_iterator i = _plans.plans().begin();
i != _plans.plans().end(); ++i ) {
shared_ptr<QueryPlanRunner> runner( _prototypeRunner.createChild() );
diff --git a/src/mongo/db/repl.cpp b/src/mongo/db/repl.cpp
index cd5b56e44ee..3b372b951d3 100644
--- a/src/mongo/db/repl.cpp
+++ b/src/mongo/db/repl.cpp
@@ -336,7 +336,7 @@ namespace mongo {
BSONObj pattern = b.done();
BSONObj o = jsobj();
- log( 1 ) << "Saving repl source: " << o << endl;
+ LOG( 1 ) << "Saving repl source: " << o << endl;
{
OpDebug debug;
@@ -649,7 +649,7 @@ namespace mongo {
*/
void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked) {
if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
- log(6) << "processing op: " << op << endl;
+ LOG(6) << "processing op: " << op << endl;
if( op.getStringField("op")[0] == 'n' )
return;
@@ -731,7 +731,7 @@ namespace mongo {
bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
if( logLevel >= 6 )
- log(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
+ LOG(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
// always apply admin command command
// this is a bit hacky -- the semantics of replication/commands aren't well specified
@@ -795,7 +795,7 @@ namespace mongo {
int ReplSource::sync_pullOpLog(int& nApplied) {
int okResultCode = 1;
string ns = string("local.oplog.$") + sourceName();
- log(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
+ LOG(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
bool tailing = true;
oplogReader.tailCheck();
@@ -818,7 +818,7 @@ namespace mongo {
if ( !e.embeddedObject().getBoolField( "empty" ) ) {
if ( name != "local" ) {
if ( only.empty() || only == name ) {
- log( 2 ) << "adding to 'addDbNextPass': " << name << endl;
+ LOG( 2 ) << "adding to 'addDbNextPass': " << name << endl;
addDbNextPass.insert( name );
}
}
@@ -846,7 +846,7 @@ namespace mongo {
tailing = false;
}
else {
- log(2) << "repl: tailing=true\n";
+ LOG(2) << "repl: tailing=true\n";
}
if( !oplogReader.haveCursor() ) {
@@ -869,7 +869,7 @@ namespace mongo {
if ( !oplogReader.more() ) {
if ( tailing ) {
- log(2) << "repl: tailing & no new activity\n";
+ LOG(2) << "repl: tailing & no new activity\n";
if( oplogReader.awaitCapable() )
okResultCode = 0; // don't sleep
@@ -908,9 +908,9 @@ namespace mongo {
}
nextOpTime = OpTime( ts.date() );
- log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
+ LOG(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
if ( initial ) {
- log(1) << "repl: initial run\n";
+ LOG(1) << "repl: initial run\n";
}
if( tailing ) {
if( !( syncedTo < nextOpTime ) ) {
@@ -1127,7 +1127,7 @@ namespace mongo {
BSONObj res;
bool ok = conn->runCommand( "admin" , cmd.obj() , res );
// ignoring for now on purpose for older versions
- log(ok) << "replHandshake res not: " << ok << " res: " << res << endl;
+ LOG(ok) << "replHandshake res not: " << ok << " res: " << res << endl;
return true;
}
@@ -1233,7 +1233,7 @@ namespace mongo {
}
if ( !oplogReader.connect(hostName) ) {
- log(4) << "repl: can't connect to sync source" << endl;
+ LOG(4) << "repl: can't connect to sync source" << endl;
return -1;
}
@@ -1413,7 +1413,7 @@ namespace mongo {
}
}
else {
- log(5) << "couldn't logKeepalive" << endl;
+ LOG(5) << "couldn't logKeepalive" << endl;
toSleep = 1;
}
}
@@ -1484,12 +1484,12 @@ namespace mongo {
if ( replSettings.slave ) {
verify( replSettings.slave == SimpleSlave );
- log(1) << "slave=true" << endl;
+ LOG(1) << "slave=true" << endl;
boost::thread repl_thread(replSlaveThread);
}
if ( replSettings.master ) {
- log(1) << "master=true" << endl;
+ LOG(1) << "master=true" << endl;
replSettings.master = true;
createOplog();
boost::thread t(replMasterThread);
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 9214c19c121..056096eedfe 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -415,7 +415,7 @@ namespace replset {
string current = target->fullName();
if (!r.connect(current)) {
- log(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
+ LOG(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
r.resetConnection();
theReplSet->veto(current);
continue;
diff --git a/src/mongo/db/repl/consensus.cpp b/src/mongo/db/repl/consensus.cpp
index e7eaebeae38..f9336aca990 100644
--- a/src/mongo/db/repl/consensus.cpp
+++ b/src/mongo/db/repl/consensus.cpp
@@ -414,7 +414,7 @@ namespace mongo {
}
else {
/* succeeded. */
- log(1) << "replSet election succeeded, assuming primary role" << rsLog;
+ LOG(1) << "replSet election succeeded, assuming primary role" << rsLog;
success = true;
rs.assumePrimary();
}
diff --git a/src/mongo/db/repl/manager.cpp b/src/mongo/db/repl/manager.cpp
index a35d69da544..72452322e08 100644
--- a/src/mongo/db/repl/manager.cpp
+++ b/src/mongo/db/repl/manager.cpp
@@ -252,7 +252,7 @@ namespace mongo {
int ll = 0;
if( ++n > 5 ) ll++;
if( last + 60 > time(0 ) ) ll++;
- log(ll) << "replSet can't see a majority, will not try to elect self" << rsLog;
+ LOG(ll) << "replSet can't see a majority, will not try to elect self" << rsLog;
last = time(0);
return;
}
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index 2e577c55754..c777b833ebc 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -79,7 +79,7 @@ namespace mongo {
}
if( !s.empty() ) {
lastLogged = _hbmsgTime;
- log(logLevel) << "replSet " << s << rsLog;
+ LOG(logLevel) << "replSet " << s << rsLog;
}
}
@@ -354,7 +354,7 @@ namespace mongo {
seedSet.insert(m);
//uassert(13101, "can't use localhost in replset host list", !m.isLocalHost());
if( m.isSelf() ) {
- log(1) << "replSet ignoring seed " << m.toString() << " (=self)" << rsLog;
+ LOG(1) << "replSet ignoring seed " << m.toString() << " (=self)" << rsLog;
}
else
seeds.push_back(m);
@@ -649,7 +649,7 @@ namespace mongo {
int n = 0;
for( vector<ReplSetConfig*>::iterator i = cfgs.begin(); i != cfgs.end(); i++ ) {
ReplSetConfig* cfg = *i;
- DEV log(1) << n+1 << " config shows version " << cfg->version << rsLog;
+ DEV LOG(1) << n+1 << " config shows version " << cfg->version << rsLog;
if( ++n == 1 ) myVersion = cfg->version;
if( cfg->ok() && cfg->version > v ) {
highest = cfg;
@@ -700,7 +700,7 @@ namespace mongo {
configs.vector().push_back( ReplSetConfig::make(HostAndPort(*i)) );
}
catch( DBException& ) {
- log(1) << "replSet exception trying to load config from discovered seed " << *i << rsLog;
+ LOG(1) << "replSet exception trying to load config from discovered seed " << *i << rsLog;
replSettings.discoveredSeeds.erase(*i);
}
}
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
index 3996c65ba3e..fe6ed00345c 100644
--- a/src/mongo/db/repl/rs_config.cpp
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -696,14 +696,14 @@ namespace mongo {
}
catch( DBException& e) {
version = v;
- log(level) << "replSet load config couldn't get from " << h.toString() << ' ' << e.what() << rsLog;
+ LOG(level) << "replSet load config couldn't get from " << h.toString() << ' ' << e.what() << rsLog;
return;
}
from(cfg);
checkRsConfig();
_ok = true;
- log(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
+ LOG(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
_constructed = true;
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 77de1fba4ac..6c653f32e79 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -430,7 +430,7 @@ namespace mongo {
try {
bob res;
string errmsg;
- log(1) << "replSet rollback drop: " << *i << rsLog;
+ LOG(1) << "replSet rollback drop: " << *i << rsLog;
dropCollection(*i, errmsg, res);
}
catch(...) {
diff --git a/src/mongo/db/repl_block.cpp b/src/mongo/db/repl_block.cpp
index ccb7a2423a5..c43244f5910 100644
--- a/src/mongo/db/repl_block.cpp
+++ b/src/mongo/db/repl_block.cpp
@@ -256,7 +256,7 @@ namespace mongo {
if (theReplSet && !theReplSet->isPrimary()) {
// we don't know the slave's port, so we make the replica set keep
// a map of rids to slaves
- log(2) << "percolating " << lastOp.toString() << " from " << rid << endl;
+ LOG(2) << "percolating " << lastOp.toString() << " from " << rid << endl;
theReplSet->ghost->send( boost::bind(&GhostSync::percolate, theReplSet->ghost, rid, lastOp) );
}
}
diff --git a/src/mongo/db/security_commands.cpp b/src/mongo/db/security_commands.cpp
index 6dbbe3dabec..00a18d06c8e 100644
--- a/src/mongo/db/security_commands.cpp
+++ b/src/mongo/db/security_commands.cpp
@@ -91,12 +91,12 @@ namespace mongo {
scoped_ptr<nonce64> ln(lastNonce.release());
if ( !ln ) {
reject = true;
- log(1) << "auth: no lastNonce" << endl;
+ LOG(1) << "auth: no lastNonce" << endl;
}
else {
digestBuilder << hex << *ln;
reject = digestBuilder.str() != received_nonce;
- if ( reject ) log(1) << "auth: different lastNonce" << endl;
+ if ( reject ) LOG(1) << "auth: different lastNonce" << endl;
}
if ( reject ) {
diff --git a/src/mongo/db/security_common.cpp b/src/mongo/db/security_common.cpp
index 3740d7562e0..bf0628fd0a7 100644
--- a/src/mongo/db/security_common.cpp
+++ b/src/mongo/db/security_common.cpp
@@ -107,7 +107,7 @@ namespace mongo {
return false;
}
- log(1) << "security key: " << str << endl;
+ LOG(1) << "security key: " << str << endl;
// createPWDigest should really not be a member func
DBClientConnection conn;
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index d8965e46383..9e135cfcc67 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -618,15 +618,15 @@ namespace ThreadedTests {
sleepmillis(100*x);
int Z = 1;
- log(Z) << x << ' ' << what[x] << " request" << endl;
+ LOG(Z) << x << ' ' << what[x] << " request" << endl;
char ch = what[x];
switch( ch ) {
case 'w':
{
m.lock();
- log(Z) << x << " w got" << endl;
+ LOG(Z) << x << " w got" << endl;
sleepmillis(100);
- log(Z) << x << " w unlock" << endl;
+ LOG(Z) << x << " w unlock" << endl;
m.unlock();
}
break;
@@ -635,7 +635,7 @@ namespace ThreadedTests {
{
Timer t;
RWLock::Upgradable u(m);
- log(Z) << x << ' ' << ch << " got" << endl;
+ LOG(Z) << x << ' ' << ch << " got" << endl;
if( ch == 'U' ) {
#ifdef MONGO_USE_SRW_ON_WINDOWS
// SRW locks are neither fair nor FIFO, as per docs
@@ -654,7 +654,7 @@ namespace ThreadedTests {
}
}
sleepsecs(1);
- log(Z) << x << ' ' << ch << " unlock" << endl;
+ LOG(Z) << x << ' ' << ch << " unlock" << endl;
}
break;
case 'r':
@@ -662,7 +662,7 @@ namespace ThreadedTests {
{
Timer t;
m.lock_shared();
- log(Z) << x << ' ' << ch << " got " << endl;
+ LOG(Z) << x << ' ' << ch << " got " << endl;
if( what[x] == 'R' ) {
if( t.millis() > 15 ) {
// commented out for less chatter, we aren't using upgradeable anyway right now:
@@ -670,7 +670,7 @@ namespace ThreadedTests {
}
}
sleepmillis(200);
- log(Z) << x << ' ' << ch << " unlock" << endl;
+ LOG(Z) << x << ' ' << ch << " unlock" << endl;
m.unlock_shared();
}
break;
@@ -822,24 +822,24 @@ namespace ThreadedTests {
int Z = 0;
Client::initThread("utest");
if( x == 1 ) {
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
rwlock_shared lk(m);
sleepmillis(300);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
}
if( x == 2 ) {
sleepmillis(100);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
rwlock lk(m, true);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
}
if( x == 3 ) {
sleepmillis(200);
Timer t;
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
rwlock_shared lk(m);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
- log(Z) << t.millis() << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
+ LOG(Z) << t.millis() << endl;
ASSERT( t.millis() > 50 );
}
cc().shutdown();
@@ -859,18 +859,18 @@ namespace ThreadedTests {
int Z = 0;
Client::initThread("qtest");
if( x == 1 ) {
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1 lock_r()..." << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1 lock_r()..." << endl;
m.lock_r();
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1 got" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1 got" << endl;
sleepmillis(300);
m.unlock_r();
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1 unlock_r()" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1 unlock_r()" << endl;
}
if( x == 2 || x == 4 ) {
sleepmillis(x*50);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2 lock_W()..." << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2 lock_W()..." << endl;
m.lock_W();
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2 got" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2 got" << endl;
gotW = true;
m.unlock_W();
}
@@ -878,12 +878,12 @@ namespace ThreadedTests {
sleepmillis(200);
Timer t;
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3 lock_r()..." << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3 lock_r()..." << endl;
m.lock_r();
verify( gotW );
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3 got" << gotW << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3 got" << gotW << endl;
m.unlock_r();
- log(Z) << t.millis() << endl;
+ LOG(Z) << t.millis() << endl;
ASSERT( t.millis() > 50 );
}
cc().shutdown();
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index c53fc411037..6e4405a6049 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -331,7 +331,7 @@ namespace mongo {
);
fromconn->done();
- log( worked ) << "moveChunk result: " << res << endl;
+ LOG( worked ) << "moveChunk result: " << res << endl;
// if succeeded, needs to reload to pick up the new location
// if failed, mongos may be stale
@@ -1334,7 +1334,7 @@ namespace mongo {
}
catch (...) {
- log( LL_ERROR ) << "\t invalid ChunkRangeMap! printing ranges:" << endl;
+ LOG( LL_ERROR ) << "\t invalid ChunkRangeMap! printing ranges:" << endl;
for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it)
cout << it->first << ": " << *it->second << endl;
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index b3332fa647a..e5ccb3203dd 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -806,7 +806,7 @@ namespace mongo {
}
if ( up == 1 ) {
- log( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
+ LOG( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
return true;
}
@@ -826,7 +826,7 @@ namespace mongo {
stringstream ss;
ss << "config servers " << _config[firstGood] << " and " << _config[i] << " differ";
- log( LL_WARNING ) << ss.str() << endl;
+ LOG( LL_WARNING ) << ss.str() << endl;
if ( tries <= 1 ) {
ss << "\n" << c1 << "\t" << c2 << "\n" << d1 << "\t" << d2;
errmsg = ss.str();
@@ -846,7 +846,7 @@ namespace mongo {
if ( checkConsistency ) {
string errmsg;
if ( ! checkConfigServersConsistent( errmsg ) ) {
- log( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings;
+ LOG( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings;
return false;
}
}
@@ -1027,7 +1027,7 @@ namespace mongo {
try {
Shard s = Shard::lookupRSName(monitor->getName());
if (s == Shard::EMPTY) {
- log(1) << "replicaSetChange: shard not found for set: " << monitor->getServerAddress() << endl;
+ LOG(1) << "replicaSetChange: shard not found for set: " << monitor->getServerAddress() << endl;
return;
}
scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getScopedDbConnection(
diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
index 9d392b0a6cf..34e787cfb80 100644
--- a/src/mongo/s/cursors.cpp
+++ b/src/mongo/s/cursors.cpp
@@ -232,7 +232,7 @@ namespace mongo {
int n = *x++;
if ( n > 2000 ) {
- log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
}
@@ -245,7 +245,7 @@ namespace mongo {
LOG(_myLogLevel) << "CursorCache::gotKillCursors id: " << id << endl;
if ( ! id ) {
- log( LL_WARNING ) << " got cursor id of 0 to kill" << endl;
+ LOG( LL_WARNING ) << " got cursor id of 0 to kill" << endl;
continue;
}
@@ -261,7 +261,7 @@ namespace mongo {
MapNormal::iterator j = _refs.find( id );
if ( j == _refs.end() ) {
- log( LL_WARNING ) << "can't find cursor: " << id << endl;
+ LOG( LL_WARNING ) << "can't find cursor: " << id << endl;
continue;
}
server = j->second;
@@ -295,7 +295,7 @@ namespace mongo {
if ( idleFor < TIMEOUT ) {
continue;
}
- log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1)
+ log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make LOG(1)
_cursors.erase( i );
i = _cursors.begin(); // possible 2nd entry will get skipped, will get on next pass
if ( i == _cursors.end() )
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 9842fd16ff9..9a9e7868164 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1091,7 +1091,7 @@ namespace mongo {
conn->done();
- log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
+ LOG(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
if ( ! ok || res["state"].String() == "fail" ) {
warning() << "moveChunk error transferring data caused migration abort: " << res << migrateLog;
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index da7e063d6f9..c641fda4f2f 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -584,7 +584,7 @@ namespace mongo {
result.append( "requestedMin" , min );
result.append( "requestedMax" , max );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
+ LOG( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
<< " is now " << currMin << "->" << currMax << endl;
return false;
}
@@ -594,7 +594,7 @@ namespace mongo {
result.append( "from" , myShard.getName() );
result.append( "official" , shard );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": chunk is at " << shard
+ LOG( LL_WARNING ) << "aborted split because " << errmsg << ": chunk is at " << shard
<< " and not at " << myShard.getName() << endl;
return false;
}
@@ -604,7 +604,7 @@ namespace mongo {
maxVersion.addToBSON( result, "officialVersion" );
shardingState.getVersion( ns ).addToBSON( result, "myVersion" );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
+ LOG( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
<< " mine: " << shardingState.getVersion(ns) << endl;
return false;
}
diff --git a/src/mongo/s/s_only.cpp b/src/mongo/s/s_only.cpp
index 8708d59aafc..33bbad838c0 100644
--- a/src/mongo/s/s_only.cpp
+++ b/src/mongo/s/s_only.cpp
@@ -89,7 +89,7 @@ namespace mongo {
log() << "command denied: " << cmdObj.toString() << endl;
return false;
}
- log( 2 ) << "command: " << cmdObj << endl;
+ LOG( 2 ) << "command: " << cmdObj << endl;
}
if (!client.getAuthenticationInfo()->isAuthorized(dbname)) {
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 0a351bd34ee..37abaf05f53 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -108,7 +108,7 @@ namespace mongo {
r.process();
}
catch ( AssertionException & e ) {
- log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;
+ LOG( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;
le->raiseError( e.getCode() , e.what() );
diff --git a/src/mongo/s/strategy_shard.cpp b/src/mongo/s/strategy_shard.cpp
index 08f2147856e..f7f8d362010 100644
--- a/src/mongo/s/strategy_shard.cpp
+++ b/src/mongo/s/strategy_shard.cpp
@@ -287,7 +287,7 @@ namespace mongo {
// targeting we've done earlier
//
- log( retries == 0 ) << op << " will be retried b/c sharding config info is stale, "
+ LOG( retries == 0 ) << op << " will be retried b/c sharding config info is stale, "
<< " retries: " << retries
<< " ns: " << ns
<< " data: " << query << endl;
diff --git a/src/mongo/s/strategy_single.cpp b/src/mongo/s/strategy_single.cpp
index 442e1d23691..f642274ccff 100644
--- a/src/mongo/s/strategy_single.cpp
+++ b/src/mongo/s/strategy_single.cpp
@@ -189,7 +189,7 @@ namespace mongo {
b.append( "err" , "can't do unlock through mongos" );
}
else {
- log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
+ LOG( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
return false;
}
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index ec7e1809ea8..4e8b86b0e5d 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -354,7 +354,7 @@ namespace mongo {
else {
// this means that the Scope was killed from a different thread
// for example a cursor got timed out that has a $where clause
- log(3) << "warning: scopeCache is empty!" << endl;
+ LOG(3) << "warning: scopeCache is empty!" << endl;
delete _real;
_real = 0;
}
diff --git a/src/mongo/scripting/engine_v8.cpp b/src/mongo/scripting/engine_v8.cpp
index 5c6a83453b6..d4da7731934 100644
--- a/src/mongo/scripting/engine_v8.cpp
+++ b/src/mongo/scripting/engine_v8.cpp
@@ -310,7 +310,7 @@ namespace mongo {
void gcCallback(GCType type, GCCallbackFlags flags) {
HeapStatistics stats;
V8::GetHeapStatistics( &stats );
- log(1) << "V8 GC heap stats - "
+ LOG(1) << "V8 GC heap stats - "
<< " total: " << stats.total_heap_size()
<< " exec: " << stats.total_heap_size_executable()
<< " used: " << stats.used_heap_size()<< " limit: "
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index 99b06ce2490..ddf38cc6159 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -202,7 +202,7 @@ public:
// skip namespaces with $ in them only if we don't specify a collection to dump
if ( _coll == "" && name.find( ".$" ) != string::npos ) {
- log(1) << "\tskipping collection: " << name << endl;
+ LOG(1) << "\tskipping collection: " << name << endl;
continue;
}
@@ -290,7 +290,7 @@ public:
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
- log(1) << loc << endl;
+ LOG(1) << loc << endl;
Record* rec = loc.rec();
BSONObj obj;
try {
diff --git a/src/mongo/tools/import.cpp b/src/mongo/tools/import.cpp
index 762f7e23bdc..87c3635dee2 100644
--- a/src/mongo/tools/import.cpp
+++ b/src/mongo/tools/import.cpp
@@ -127,7 +127,7 @@ class Import : public Tool {
uassert(16329, str::stream() << "read error, or input line too long (max length: "
<< BUF_SIZE << ")", !(in->rdstate() & ios_base::failbit));
- log(1) << "got line:" << buf << endl;
+ LOG(1) << "got line:" << buf << endl;
}
uassert( 10263 , "unknown error reading file" ,
(!(in->rdstate() & ios_base::badbit)) &&
@@ -349,7 +349,7 @@ public:
return -1;
}
- log(1) << "ns: " << ns << endl;
+ LOG(1) << "ns: " << ns << endl;
auth();
@@ -411,7 +411,7 @@ public:
}
time_t start = time(0);
- log(1) << "filesize: " << fileSize << endl;
+ LOG(1) << "filesize: " << fileSize << endl;
ProgressMeter pm( fileSize );
int num = 0;
int lastNumChecked = num;
diff --git a/src/mongo/tools/restore.cpp b/src/mongo/tools/restore.cpp
index c3e9570dea5..4edca9d7129 100644
--- a/src/mongo/tools/restore.cpp
+++ b/src/mongo/tools/restore.cpp
@@ -177,7 +177,7 @@ public:
}
void drillDown( boost::filesystem::path root, bool use_db, bool use_coll, bool top_level=false ) {
- log(2) << "drillDown: " << root.string() << endl;
+ LOG(2) << "drillDown: " << root.string() << endl;
// skip hidden files and directories
if (root.leaf().string()[0] == '.' && root.leaf().string() != ".")
@@ -475,7 +475,7 @@ private:
}
}
BSONObj o = bo.obj();
- log(0) << "\tCreating index: " << o << endl;
+ LOG(0) << "\tCreating index: " << o << endl;
conn().insert( _curdb + ".system.indexes" , o );
// We're stricter about errors for indexes than for regular data
diff --git a/src/mongo/tools/tool.cpp b/src/mongo/tools/tool.cpp
index 261b28ccf33..bbfc5ac80a3 100644
--- a/src/mongo/tools/tool.cpp
+++ b/src/mongo/tools/tool.cpp
@@ -473,7 +473,7 @@ namespace mongo {
posix_fadvise(fileno(file), 0, fileLength, POSIX_FADV_SEQUENTIAL);
#endif
- log(1) << "\t file size: " << fileLength << endl;
+ LOG(1) << "\t file size: " << fileLength << endl;
unsigned long long read = 0;
unsigned long long num = 0;
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index 44aaaabd5e6..d1e10333372 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -113,9 +113,9 @@ namespace mongo {
Result * Suite::run( const std::string& filter, int runsPerTest ) {
- log(1) << "\t about to setupTests" << std::endl;
+ LOG(1) << "\t about to setupTests" << std::endl;
setupTests();
- log(1) << "\t done setupTests" << std::endl;
+ LOG(1) << "\t done setupTests" << std::endl;
Result * r = new Result( _name );
Result::cur = r;
@@ -123,7 +123,7 @@ namespace mongo {
for ( std::vector<TestHolder*>::iterator i=_tests.begin(); i!=_tests.end(); i++ ) {
TestHolder* tc = *i;
if ( filter.size() && tc->getName().find( filter ) == std::string::npos ) {
- log(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << std::endl;
+ LOG(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << std::endl;
continue;
}
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index f1c89633268..4fca89b5310 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -64,10 +64,10 @@ namespace mongo {
run();
}
catch ( std::exception& e ) {
- log( LL_ERROR ) << "backgroundjob " << name() << "error: " << e.what() << endl;
+ LOG( LL_ERROR ) << "backgroundjob " << name() << "error: " << e.what() << endl;
}
catch(...) {
- log( LL_ERROR ) << "uncaught exception in BackgroundJob " << name() << endl;
+ LOG( LL_ERROR ) << "uncaught exception in BackgroundJob " << name() << endl;
}
{
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
index 905029736bf..07a962b42a2 100644
--- a/src/mongo/util/file_allocator.cpp
+++ b/src/mongo/util/file_allocator.cpp
@@ -155,7 +155,7 @@ namespace mongo {
void FileAllocator::ensureLength(int fd , long size) {
#if !defined(_WIN32)
if (useSparseFiles(fd)) {
- log(1) << "using ftruncate to create a sparse file" << endl;
+ LOG(1) << "using ftruncate to create a sparse file" << endl;
int ret = ftruncate(fd, size);
uassert(16063, "ftruncate failed: " + errnoWithDescription(), ret == 0);
return;
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index dfc99783cba..dbdba4674e6 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -1,4 +1,4 @@
-// listen.h
+// listen.cpp
/* Copyright 2009 10gen Inc.
*
@@ -363,7 +363,7 @@ namespace mongo {
int max = (int)(limit.rlim_cur * .8);
- log(1) << "fd limit"
+ LOG(1) << "fd limit"
<< " hard:" << limit.rlim_max
<< " soft:" << limit.rlim_cur
<< " max conn: " << max
@@ -383,7 +383,7 @@ namespace mongo {
if ( current < want ) {
// they want fewer than they can handle
// which is fine
- log(1) << " only allowing " << current << " connections" << endl;
+ LOG(1) << " only allowing " << current << " connections" << endl;
return;
}
if ( current > want ) {
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index 8148dda841b..478d057b773 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -175,7 +175,7 @@ again:
if ( len == 542393671 ) {
// an http GET
- log( psock->getLogLevel() ) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
+ LOG( psock->getLogLevel() ) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
string msg = "You are trying to access MongoDB on the native driver port. For http diagnostic access, add 1000 to the port number\n";
stringstream ss;
ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
@@ -183,7 +183,7 @@ again:
send( s.c_str(), s.size(), "http" );
return false;
}
- log(0) << "recv(): message len " << len << " is too large" << len << endl;
+ LOG(0) << "recv(): message len " << len << " is too large" << len << endl;
return false;
}
@@ -205,7 +205,7 @@ again:
}
catch ( const SocketException & e ) {
- log(psock->getLogLevel() + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
+ LOG(psock->getLogLevel() + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
m.reset();
return false;
}
diff --git a/src/mongo/util/net/miniwebserver.cpp b/src/mongo/util/net/miniwebserver.cpp
index 3888c059914..1e04693337f 100644
--- a/src/mongo/util/net/miniwebserver.cpp
+++ b/src/mongo/util/net/miniwebserver.cpp
@@ -171,7 +171,7 @@ namespace mongo {
psock->close();
}
catch ( SocketException& e ) {
- log(1) << "couldn't send data to http client: " << e << endl;
+ LOG(1) << "couldn't send data to http client: " << e << endl;
}
}
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index abcbfc341a7..392ce21ac91 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -446,7 +446,7 @@ namespace mongo {
_fd = socket(remote.getType(), SOCK_STREAM, 0);
if ( _fd == INVALID_SOCKET ) {
- log(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
+ LOG(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
return false;
}
@@ -511,12 +511,12 @@ namespace mongo {
const int mongo_errno = errno;
if ( ( mongo_errno == EAGAIN || mongo_errno == EWOULDBLOCK ) && _timeout != 0 ) {
#endif
- log(_logLevel) << "Socket " << context << " send() timed out " << _remote.toString() << endl;
+ LOG(_logLevel) << "Socket " << context << " send() timed out " << _remote.toString() << endl;
throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
}
else {
SocketException::Type t = SocketException::SEND_ERROR;
- log(_logLevel) << "Socket " << context << " send() "
+ LOG(_logLevel) << "Socket " << context << " send() "
<< errnoWithDescription(mongo_errno) << ' ' << remoteString() << endl;
throw SocketException( t , remoteString() );
}
@@ -574,11 +574,11 @@ namespace mongo {
int ret = ::sendmsg( _fd , &meta , portSendFlags );
if ( ret == -1 ) {
if ( errno != EAGAIN || _timeout == 0 ) {
- log(_logLevel) << "Socket " << context << " send() " << errnoWithDescription() << ' ' << remoteString() << endl;
+ LOG(_logLevel) << "Socket " << context << " send() " << errnoWithDescription() << ' ' << remoteString() << endl;
throw SocketException( SocketException::SEND_ERROR , remoteString() );
}
else {
- log(_logLevel) << "Socket " << context << " send() remote timeout " << remoteString() << endl;
+ LOG(_logLevel) << "Socket " << context << " send() remote timeout " << remoteString() << endl;
throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
}
}
@@ -607,13 +607,13 @@ namespace mongo {
int ret = unsafe_recv( buf , len );
if ( ret > 0 ) {
if ( len <= 4 && ret != len )
- log(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
+ LOG(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
verify( ret <= len );
len -= ret;
buf += ret;
}
else if ( ret == 0 ) {
- log(3) << "Socket recv() conn closed? " << remoteString() << endl;
+ LOG(3) << "Socket recv() conn closed? " << remoteString() << endl;
throw SocketException( SocketException::CLOSED , remoteString() );
}
else { /* ret < 0 */
@@ -635,11 +635,11 @@ namespace mongo {
) && _timeout > 0 )
{
// this is a timeout
- log(_logLevel) << "Socket recv() timeout " << remoteString() <<endl;
+ LOG(_logLevel) << "Socket recv() timeout " << remoteString() <<endl;
throw SocketException( SocketException::RECV_TIMEOUT, remoteString() );
}
- log(_logLevel) << "Socket recv() " << errnoWithDescription(e) << " " << remoteString() <<endl;
+ LOG(_logLevel) << "Socket recv() " << errnoWithDescription(e) << " " << remoteString() <<endl;
throw SocketException( SocketException::RECV_ERROR , remoteString() );
}
}
diff --git a/src/mongo/util/paths.h b/src/mongo/util/paths.h
index e7ca6d5d17c..c75a7e4f737 100644
--- a/src/mongo/util/paths.h
+++ b/src/mongo/util/paths.h
@@ -110,7 +110,7 @@ namespace mongo {
boost::filesystem::path dir = file.branch_path(); // parent_path in new boosts
- log(1) << "flushing directory " << dir.string() << endl;
+ LOG(1) << "flushing directory " << dir.string() << endl;
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650, str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: " << errnoWithDescription(), fd >= 0);
diff --git a/src/mongo/util/version.cpp b/src/mongo/util/version.cpp
index 18f2a1b28eb..305592b48b9 100644
--- a/src/mongo/util/version.cpp
+++ b/src/mongo/util/version.cpp
@@ -309,7 +309,7 @@ namespace mongo {
verify( versionCmp("1.2.3-", "1.2.3") < 0 );
verify( versionCmp("1.2.3-pre", "1.2.3") < 0 );
- log(1) << "versionCmpTest passed" << endl;
+ LOG(1) << "versionCmpTest passed" << endl;
}
} versionCmpTest;
@@ -333,7 +333,7 @@ namespace mongo {
verify( _versionArray("1.2.0-rc4-pre-") == BSON_ARRAY(1 << 2 << 0 << -6) );
verify( _versionArray("2.0.0-rc5-pre-") == BSON_ARRAY(2 << 0 << 0 << -5) );
- log(1) << "versionArrayTest passed" << endl;
+ LOG(1) << "versionArrayTest passed" << endl;
}
} versionArrayTest;
}