summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShaun Verch <shaun.verch@10gen.com>2012-10-24 10:45:27 -0700
committerMathias Stearn <redbeard0531@gmail.com>2013-03-06 17:32:43 -0500
commit910e3d71fed12bc5731c652b793183c15dbb0977 (patch)
treeaa29b412939045b15ffd45dc8310d6c63ac0bcdf
parent0b92639ad548a9262bf7ee19dbf66b7478fe09e8 (diff)
downloadmongo-910e3d71fed12bc5731c652b793183c15dbb0977.tar.gz
SERVER-7231 Use LOG(level) macro instead of log(level) function
Conflicts: src/mongo/client/distlock.cpp src/mongo/db/geo/haystack.cpp src/mongo/db/pdfile.cpp src/mongo/db/queryoptimizer.cpp src/mongo/db/repl/rs.cpp src/mongo/s/config.cpp
-rw-r--r--src/mongo/client/dbclient.cpp14
-rw-r--r--src/mongo/client/dbclient_rs.cpp20
-rw-r--r--src/mongo/client/distlock.cpp68
-rw-r--r--src/mongo/client/distlock_test.cpp6
-rw-r--r--src/mongo/client/model.cpp4
-rw-r--r--src/mongo/client/parallel.cpp34
-rw-r--r--src/mongo/db/btree.cpp4
-rw-r--r--src/mongo/db/btreebuilder.cpp6
-rw-r--r--src/mongo/db/cloner.cpp8
-rw-r--r--src/mongo/db/commands/isself.cpp12
-rw-r--r--src/mongo/db/commands/mr.cpp12
-rw-r--r--src/mongo/db/db.cpp8
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/dur.cpp2
-rw-r--r--src/mongo/db/dur_journal.cpp2
-rw-r--r--src/mongo/db/durop.cpp2
-rw-r--r--src/mongo/db/extsort.cpp8
-rw-r--r--src/mongo/db/geo/haystack.cpp2
-rw-r--r--src/mongo/db/index.cpp6
-rw-r--r--src/mongo/db/index_update.cpp12
-rw-r--r--src/mongo/db/instance.cpp10
-rw-r--r--src/mongo/db/oplog.cpp2
-rw-r--r--src/mongo/db/pdfile.cpp20
-rw-r--r--src/mongo/db/projection.cpp6
-rw-r--r--src/mongo/db/queryoptimizer.cpp2
-rw-r--r--src/mongo/db/repl.cpp28
-rw-r--r--src/mongo/db/repl/bgsync.cpp2
-rw-r--r--src/mongo/db/repl/consensus.cpp2
-rw-r--r--src/mongo/db/repl/manager.cpp2
-rw-r--r--src/mongo/db/repl/rs.cpp8
-rw-r--r--src/mongo/db/repl/rs_config.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp2
-rw-r--r--src/mongo/db/repl_block.cpp2
-rw-r--r--src/mongo/db/security_commands.cpp4
-rw-r--r--src/mongo/db/security_common.cpp2
-rw-r--r--src/mongo/dbtests/threadedtests.cpp44
-rw-r--r--src/mongo/s/chunk.cpp4
-rw-r--r--src/mongo/s/config.cpp8
-rw-r--r--src/mongo/s/cursors.cpp8
-rw-r--r--src/mongo/s/d_migrate.cpp2
-rw-r--r--src/mongo/s/d_split.cpp6
-rw-r--r--src/mongo/s/s_only.cpp2
-rw-r--r--src/mongo/s/server.cpp2
-rw-r--r--src/mongo/s/strategy_shard.cpp2
-rw-r--r--src/mongo/s/strategy_single.cpp2
-rw-r--r--src/mongo/scripting/engine.cpp2
-rw-r--r--src/mongo/scripting/engine_v8.cpp2
-rw-r--r--src/mongo/tools/dump.cpp4
-rw-r--r--src/mongo/tools/import.cpp6
-rw-r--r--src/mongo/tools/restore.cpp4
-rw-r--r--src/mongo/tools/tool.cpp2
-rw-r--r--src/mongo/unittest/unittest.cpp6
-rw-r--r--src/mongo/util/background.cpp4
-rw-r--r--src/mongo/util/file_allocator.cpp2
-rw-r--r--src/mongo/util/net/listen.cpp6
-rw-r--r--src/mongo/util/net/message_port.cpp6
-rw-r--r--src/mongo/util/net/miniwebserver.cpp2
-rw-r--r--src/mongo/util/net/sock.cpp18
-rw-r--r--src/mongo/util/paths.h2
-rw-r--r--src/mongo/util/version.cpp4
61 files changed, 249 insertions, 239 deletions
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index d0a53303624..9f7a1a48ac7 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -96,12 +96,12 @@ namespace mongo {
case MASTER: {
DBClientConnection * c = new DBClientConnection(true);
c->setSoTimeout( socketTimeout );
- log(1) << "creating new connection to:" << _servers[0] << endl;
+ LOG(1) << "creating new connection to:" << _servers[0] << endl;
if ( ! c->connect( _servers[0] , errmsg ) ) {
delete c;
return 0;
}
- log(1) << "connected connection!" << endl;
+ LOG(1) << "connected connection!" << endl;
return c;
}
@@ -810,22 +810,22 @@ namespace mongo {
throw SocketException( SocketException::FAILED_STATE , toString() );
lastReconnectTry = time(0);
- log(_logLevel) << "trying reconnect to " << _serverString << endl;
+ LOG(_logLevel) << "trying reconnect to " << _serverString << endl;
string errmsg;
_failed = false;
if ( ! _connect(errmsg) ) {
_failed = true;
- log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
+ LOG(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
throw SocketException( SocketException::CONNECT_ERROR , toString() );
}
- log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
+ LOG(_logLevel) << "reconnect " << _serverString << " ok" << endl;
for( map< string, pair<string,string> >::iterator i = authCache.begin(); i != authCache.end(); i++ ) {
const char *dbname = i->first.c_str();
const char *username = i->second.first.c_str();
const char *password = i->second.second.c_str();
if( !DBClientBase::auth(dbname, username, password, errmsg, false) )
- log(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
+ LOG(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
}
}
@@ -1049,7 +1049,7 @@ namespace mongo {
if ( ! runCommand( nsToDatabase( ns.c_str() ) ,
BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
info ) ) {
- log(_logLevel) << "dropIndex failed: " << info << endl;
+ LOG(_logLevel) << "dropIndex failed: " << info << endl;
uassert( 10007 , "dropIndex failed" , 0 );
}
resetIndexCache();
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 1818254cf97..2fd95be1bb9 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -78,7 +78,7 @@ namespace mongo {
const ReplicaSetMonitor::Node& node = nodes[nextNodeIndex];
if (!node.ok) {
- log(2) << "dbclient_rs not selecting " << node << ", not currently ok" << endl;
+ LOG(2) << "dbclient_rs not selecting " << node << ", not currently ok" << endl;
continue;
}
@@ -93,7 +93,7 @@ namespace mongo {
if (node.isLocalSecondary(localThresholdMillis)) {
// found a local node. return early.
- log(2) << "dbclient_rs getSlave found local secondary for queries: "
+ LOG(2) << "dbclient_rs getSlave found local secondary for queries: "
<< nextNodeIndex << ", ping time: " << node.pingTimeMillis << endl;
*lastHost = fallbackHost;
return fallbackHost;
@@ -287,7 +287,7 @@ namespace mongo {
if ( createFromSeed ) {
map<string,vector<HostAndPort> >::const_iterator j = _seedServers.find( name );
if ( j != _seedServers.end() ) {
- log(4) << "Creating ReplicaSetMonitor from cached address" << endl;
+ LOG(4) << "Creating ReplicaSetMonitor from cached address" << endl;
ReplicaSetMonitorPtr& m = _sets[name];
verify( !m );
m.reset( new ReplicaSetMonitor( name, j->second ) );
@@ -339,7 +339,7 @@ namespace mongo {
}
void ReplicaSetMonitor::_remove_inlock( const string& name, bool clearSeedCache ) {
- log(2) << "Removing ReplicaSetMonitor for " << name << " from replica set table" << endl;
+ LOG(2) << "Removing ReplicaSetMonitor for " << name << " from replica set table" << endl;
_sets.erase( name );
if ( clearSeedCache ) {
_seedServers.erase( name );
@@ -464,21 +464,21 @@ namespace mongo {
return fallbackNode;
else if ( _nodes[ _nextSlave ].isLocalSecondary( _localThresholdMillis ) ) {
// found a local slave. return early.
- log(2) << "dbclient_rs getSlave found local secondary for queries: "
+ LOG(2) << "dbclient_rs getSlave found local secondary for queries: "
<< _nextSlave << ", ping time: "
<< _nodes[ _nextSlave ].pingTimeMillis << endl;
return fallbackNode;
}
}
else
- log(2) << "dbclient_rs getSlave not selecting " << _nodes[_nextSlave]
+ LOG(2) << "dbclient_rs getSlave not selecting " << _nodes[_nextSlave]
<< ", not currently okForSecondaryQueries" << endl;
}
}
if ( ! fallbackNode.empty() ) {
// use a non-local secondary, even if local was preferred
- log(1) << "dbclient_rs getSlave falling back to a non-local secondary node" << endl;
+ LOG(1) << "dbclient_rs getSlave falling back to a non-local secondary node" << endl;
return fallbackNode;
}
@@ -487,7 +487,7 @@ namespace mongo {
_master < static_cast<int>(_nodes.size()) && _nodes[_master].ok);
// Fall back to primary
- log(1) << "dbclient_rs getSlave no member in secondary state found, "
+ LOG(1) << "dbclient_rs getSlave no member in secondary state found, "
"returning primary " << _nodes[ _master ] << endl;
return _nodes[_master].addr;
}
@@ -739,7 +739,7 @@ namespace mongo {
node.lastIsMaster = o.copy();
}
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << conn->toString()
+ LOG( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << conn->toString()
<< ' ' << o << endl;
// add other nodes
@@ -762,7 +762,7 @@ namespace mongo {
}
catch ( std::exception& e ) {
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception "
+ LOG( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception "
<< conn->toString() << ' ' << e.what() << endl;
errorOccured = true;
diff --git a/src/mongo/client/distlock.cpp b/src/mongo/client/distlock.cpp
index bd39bb1296e..7a49e5f3d17 100644
--- a/src/mongo/client/distlock.cpp
+++ b/src/mongo/client/distlock.cpp
@@ -85,14 +85,14 @@ namespace mongo {
string pingId = pingThreadId( addr, process );
- log( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
+ LOG( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
<< " and process " << process
<< " (sleeping for " << sleepTime << "ms)" << endl;
static int loops = 0;
while( ! inShutdown() && ! shouldKill( addr, process ) ) {
- log( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
+ LOG( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
Date_t pingTime;
@@ -155,7 +155,7 @@ namespace mongo {
conn->ensureIndex( DistributedLock::lockPingNS , BSON( "ping" << 1 ) );
}
- log( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
+ LOG( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
<< " by distributed lock pinger '" << pingId
<< "', sleeping for " << sleepTime << "ms" << endl;
@@ -165,7 +165,7 @@ namespace mongo {
int numOldLocks = _oldLockOIDs.size();
if( numOldLocks > 0 )
- log( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
bool removed = false;
for( list<OID>::iterator i = _oldLockOIDs.begin(); i != _oldLockOIDs.end();
@@ -179,11 +179,11 @@ namespace mongo {
// Either the update went through or it didn't, either way we're done trying to
// unlock
- log( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
removed = true;
}
catch( UpdateNotTheSame& ) {
- log( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
removed = true;
}
catch ( std::exception& e) {
@@ -194,7 +194,7 @@ namespace mongo {
}
if( numOldLocks > 0 && _oldLockOIDs.size() > 0 ){
- log( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
+ LOG( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
}
conn.done();
@@ -319,7 +319,7 @@ namespace mongo {
_lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ), _maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ), _lockPing( _maxClockSkew ),
_mutex( "DistributedLock" )
{
- log( logLvl ) << "created new distributed lock for " << name << " on " << conn
+ LOG( logLvl ) << "created new distributed lock for " << name << " on " << conn
<< " ( lock timeout : " << _lockTimeout
<< ", ping interval : " << _lockPing << ", process : " << asProcess << " )" << endl;
@@ -427,7 +427,7 @@ namespace mongo {
// Skew is how much time we'd have to add to local to get to remote
avgSkews[s] += (long long) (remote - local);
- log( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
+ LOG( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
}
}
@@ -459,11 +459,11 @@ namespace mongo {
// Make sure our max skew is not more than our pre-set limit
if(totalSkew > (long long) maxClockSkew) {
- log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
+ LOG( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
return false;
}
- log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
+ LOG( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
return true;
}
@@ -517,7 +517,7 @@ namespace mongo {
// Case 1: No locks
if ( o.isEmpty() ) {
try {
- log( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
+ LOG( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
conn->insert( locksNS , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
}
catch ( UserException& e ) {
@@ -532,10 +532,10 @@ namespace mongo {
bool canReenter = reenter && o["process"].String() == _processId && ! distLockPinger.willUnlockOID( o["ts"].OID() ) && o["state"].numberInt() == 2;
if( reenter && ! canReenter ) {
- log( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
- if( o["process"].String() != _processId ) log( logLvl - 1 ) << ", different process " << _processId << endl;
- else if( o["state"].numberInt() == 2 ) log( logLvl - 1 ) << ", state not finalized" << endl;
- else log( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
+ LOG( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
+ if( o["process"].String() != _processId ) LOG( logLvl - 1 ) << ", different process " << _processId << endl;
+ else if( o["state"].numberInt() == 2 ) LOG( logLvl - 1 ) << ", state not finalized" << endl;
+ else LOG( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
// reset since we've been bounced by a previous lock not being where we thought it was,
// and should go through full forcing process if required.
@@ -546,7 +546,7 @@ namespace mongo {
BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
if ( lastPing.isEmpty() ) {
- log( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
+ LOG( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
// TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
lastPing = BSON( "_id" << o["process"].String() << "ping" << (Date_t) 0 );
}
@@ -555,7 +555,7 @@ namespace mongo {
unsigned long long takeover = _lockTimeout;
PingData _lastPingCheck = getLastPing();
- log( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.id << " and ping " << _lastPingCheck.lastPing << endl;
+ LOG( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.id << " and ping " << _lastPingCheck.lastPing << endl;
try {
@@ -592,17 +592,17 @@ namespace mongo {
}
if ( elapsed <= takeover && ! canReenter ) {
- log( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
+ LOG( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
*other = o; other->getOwned(); conn.done();
return false;
}
else if( elapsed > takeover && canReenter ) {
- log( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
+ LOG( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
*other = o; other->getOwned(); conn.done();
return false;
}
- log( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
+ LOG( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
<< ( canReenter ? "re-entering is allowed, " : "" )
<< "elapsed time " << elapsed << " > takeover time " << takeover << endl;
@@ -631,7 +631,7 @@ namespace mongo {
// TODO: Clean up all the extra code to exit this method, probably with a refactor
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
- ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
+ ( errMsg.empty() ? LOG( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
<< ( !errMsg.empty() ? causedBy(errMsg) : string("(another force won)") ) << endl;
*other = o; other->getOwned(); conn.done();
return false;
@@ -673,7 +673,7 @@ namespace mongo {
// TODO: Clean up all the extra code to exit this method, probably with a refactor
if ( ! errMsg.empty() || ! err["n"].type() || err["n"].numberInt() < 1 ) {
- ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
+ ( errMsg.empty() ? LOG( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
<< ( !errMsg.empty() ? causedBy(errMsg) : string("(not sure lock is held)") )
<< " gle: " << err
<< endl;
@@ -694,14 +694,14 @@ namespace mongo {
<< lockName << causedBy( e ), 13660);
}
- log( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
+ LOG( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
*other = o.getOwned();
conn.done();
return true;
}
- log( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
+ LOG( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
// We don't need the ts value in the query, since we will only ever replace locks with state=0.
}
@@ -730,7 +730,7 @@ namespace mongo {
// Main codepath to acquire lock
- log( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
+ LOG( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
<< lockDetails.jsonString(Strict, true) << "\n"
<< query.jsonString(Strict, true) << endl;
@@ -742,7 +742,7 @@ namespace mongo {
currLock = conn->findOne( locksNS , _id );
if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
- ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
+ ( errMsg.empty() ? LOG( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
<< ( !errMsg.empty() ? causedBy( errMsg ) : string("(another update won)") ) << endl;
*other = currLock;
other->getOwned();
@@ -821,11 +821,11 @@ namespace mongo {
// Locks on all servers are now set and safe until forcing
if ( currLock["ts"] == lockDetails["ts"] ) {
- log( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
+ LOG( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
gotLock = true;
}
else {
- log( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
+ LOG( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
// Register the lock for deletion, to speed up failover
// Not strictly necessary, but helpful
@@ -894,9 +894,9 @@ namespace mongo {
// Log our lock results
if(gotLock)
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
else
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
conn.done();
@@ -951,12 +951,12 @@ namespace mongo {
continue;
}
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
conn.done();
return;
}
catch( UpdateNotTheSame& ) {
- log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
+ LOG( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
conn.done();
break;
}
@@ -972,7 +972,7 @@ namespace mongo {
if( attempted > maxAttempts && ! oldLock.isEmpty() && ! oldLock["ts"].eoo() ) {
- log( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
+ LOG( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
<< ", will attempt again later" << endl;
// We couldn't unlock the lock at all, so try again later in the pinging thread...
diff --git a/src/mongo/client/distlock_test.cpp b/src/mongo/client/distlock_test.cpp
index 412a80e6e42..0dbe8366cb8 100644
--- a/src/mongo/client/distlock_test.cpp
+++ b/src/mongo/client/distlock_test.cpp
@@ -363,11 +363,11 @@ namespace mongo {
bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
}
else {
- log( logLvl ) << "No host clocks to skew." << endl;
+ LOG( logLvl ) << "No host clocks to skew." << endl;
return;
}
- log( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
+ LOG( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
unsigned s = 0;
for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
@@ -385,7 +385,7 @@ namespace mongo {
uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
- log( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
+ LOG( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
}
catch(...) {
conn->done();
diff --git a/src/mongo/client/model.cpp b/src/mongo/client/model.cpp
index 4b1447f03f8..4a66625da39 100644
--- a/src/mongo/client/model.cpp
+++ b/src/mongo/client/model.cpp
@@ -94,7 +94,7 @@ namespace mongo {
conn->get()->insert( getNS() , o );
_id = o["_id"].wrap().getOwned();
- log(4) << "inserted new model " << getNS() << " " << o << endl;
+ LOG(4) << "inserted new model " << getNS() << " " << o << endl;
}
else {
if ( myId.eoo() ) {
@@ -110,7 +110,7 @@ namespace mongo {
BSONObj q = qb.obj();
BSONObj o = b.obj();
- log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
+ LOG(4) << "updated model" << getNS() << " " << q << " " << o << endl;
conn->get()->update( getNS() , q , o , true );
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 83178dfcb95..d8f9726a45a 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -737,7 +737,7 @@ namespace mongo {
// It's actually okay if we set the version here, since either the
// manager will be verified as compatible, or if the manager doesn't
// exist, we don't care about version consistency
- log( pc ) << "needed to set remote version on connection to value "
+ LOG( pc ) << "needed to set remote version on connection to value "
<< "compatible with " << vinfo << endl;
}
} catch ( const DBException& dbEx ) {
@@ -776,7 +776,7 @@ namespace mongo {
string prefix;
if( _totalTries > 0 ) prefix = str::stream() << "retrying (" << _totalTries << " tries)";
else prefix = "creating";
- log( pc ) << prefix << " pcursor over " << _qSpec << " and " << _cInfo << endl;
+ LOG( pc ) << prefix << " pcursor over " << _qSpec << " and " << _cInfo << endl;
set<Shard> todoStorage;
set<Shard>& todo = todoStorage;
@@ -799,7 +799,8 @@ namespace mongo {
// Close all cursors on extra shards first, as these will be invalid
for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
- log( pc ) << "closing cursor on shard " << i->first << " as the connection is no longer required by " << vinfo << endl;
+ LOG( pc ) << "closing cursor on shard " << i->first
+ << " as the connection is no longer required by " << vinfo << endl;
// Force total cleanup of these connections
if( todo.find( i->first ) == todo.end() ) i->second.cleanup();
@@ -815,7 +816,8 @@ namespace mongo {
verify( todo.size() );
- log( pc ) << "initializing over " << todo.size() << " shards required by " << vinfo << endl;
+ LOG( pc ) << "initializing over " << todo.size()
+ << " shards required by " << vinfo << endl;
// Don't retry indefinitely for whatever reason
_totalTries++;
@@ -826,7 +828,8 @@ namespace mongo {
const Shard& shard = *i;
PCMData& mdata = _cursorMap[ shard ];
- log( pc ) << "initializing on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ LOG( pc ) << "initializing on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
// This may be the first time connecting to this shard, if so we can get an error here
try {
@@ -951,8 +954,9 @@ namespace mongo {
}
- log( pc ) << "initialized " << ( isCommand() ? "command " : "query " ) << ( lazyInit ? "(lazily) " : "(full) " ) << "on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
-
+ LOG( pc ) << "initialized " << ( isCommand() ? "command " : "query " )
+ << ( lazyInit ? "(lazily) " : "(full) " ) << "on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
}
catch( StaleConfigException& e ){
@@ -967,7 +971,9 @@ namespace mongo {
_markStaleNS( staleNS, e, forceReload, fullReload );
int logLevel = fullReload ? 0 : 1;
- log( pc + logLevel ) << "stale config of ns " << staleNS << " during initialization, will retry with forced : " << forceReload << ", full : " << fullReload << causedBy( e ) << endl;
+ LOG( pc + logLevel ) << "stale config of ns "
+ << staleNS << " during initialization, will retry with forced : "
+ << forceReload << ", full : " << fullReload << causedBy( e ) << endl;
// This is somewhat strange
if( staleNS != ns )
@@ -1042,14 +1048,15 @@ namespace mongo {
bool retry = false;
map< string, StaleConfigException > staleNSExceptions;
- log( pc ) << "finishing over " << _cursorMap.size() << " shards" << endl;
+ LOG( pc ) << "finishing over " << _cursorMap.size() << " shards" << endl;
for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
const Shard& shard = i->first;
PCMData& mdata = i->second;
- log( pc ) << "finishing on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ LOG( pc ) << "finishing on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
// Ignore empty conns for now
if( ! mdata.pcState ) continue;
@@ -1100,7 +1107,8 @@ namespace mongo {
// Finalize state
state->cursor->attach( state->conn.get() ); // Closes connection for us
- log( pc ) << "finished on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ LOG( pc ) << "finished on shard " << shard
+ << ", current connection state is " << mdata.toBSON() << endl;
}
}
catch( RecvStaleConfigException& e ){
@@ -1175,7 +1183,9 @@ namespace mongo {
_markStaleNS( staleNS, exception, forceReload, fullReload );
int logLevel = fullReload ? 0 : 1;
- log( pc + logLevel ) << "stale config of ns " << staleNS << " on finishing query, will retry with forced : " << forceReload << ", full : " << fullReload << causedBy( exception ) << endl;
+ LOG( pc + logLevel ) << "stale config of ns "
+ << staleNS << " on finishing query, will retry with forced : "
+ << forceReload << ", full : " << fullReload << causedBy( exception ) << endl;
// This is somewhat strange
if( staleNS != ns )
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
index fb30992d572..3038b299eb5 100644
--- a/src/mongo/db/btree.cpp
+++ b/src/mongo/db/btree.cpp
@@ -1709,7 +1709,7 @@ namespace mongo {
if ( found ) {
const _KeyNode& kn = k(pos);
if ( kn.isUnused() ) {
- log(4) << "btree _insert: reusing unused key" << endl;
+ LOG(4) << "btree _insert: reusing unused key" << endl;
c.b = this;
c.pos = pos;
c.op = IndexInsertionContinuation::SetUsed;
@@ -1764,7 +1764,7 @@ namespace mongo {
if ( found ) {
const _KeyNode& kn = k(pos);
if ( kn.isUnused() ) {
- log(4) << "btree _insert: reusing unused key" << endl;
+ LOG(4) << "btree _insert: reusing unused key" << endl;
massert( 10285 , "_insert: reuse key but lchild is not null", lChild.isNull());
massert( 10286 , "_insert: reuse key but rchild is not null", rChild.isNull());
kn.writing().setUsed();
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
index 5619474ee07..1c09a503348 100644
--- a/src/mongo/db/btreebuilder.cpp
+++ b/src/mongo/db/btreebuilder.cpp
@@ -149,7 +149,7 @@ namespace mongo {
}
if( levels > 1 )
- log(2) << "btree levels: " << levels << endl;
+ LOG(2) << "btree levels: " << levels << endl;
}
/** when all addKeys are done, we then build the higher levels of the tree */
@@ -163,7 +163,7 @@ namespace mongo {
BtreeBuilder<V>::~BtreeBuilder() {
DESTRUCTOR_GUARD(
if( !committed ) {
- log(2) << "Rolling back partially built index space" << endl;
+ LOG(2) << "Rolling back partially built index space" << endl;
DiskLoc x = first;
while( !x.isNull() ) {
DiskLoc next = x.btree<V>()->tempNext();
@@ -173,7 +173,7 @@ namespace mongo {
getDur().commitIfNeeded();
}
verify( idx.head.isNull() );
- log(2) << "done rollback" << endl;
+ LOG(2) << "done rollback" << endl;
}
)
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 8ca08303d85..4be446b5f25 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -379,7 +379,7 @@ namespace mongo {
while ( c->more() ) {
BSONObj collection = c->next();
- log(2) << "\t cloner got " << collection << endl;
+ LOG(2) << "\t cloner got " << collection << endl;
BSONElement e = collection.getField("name");
if ( e.eoo() ) {
@@ -394,7 +394,7 @@ namespace mongo {
/* system.users and s.js is cloned -- but nothing else from system.
* system.indexes is handled specially at the end*/
if( legalClientSystemNS( from_name , true ) == 0 ) {
- log(2) << "\t\t not cloning because system collection" << endl;
+ LOG(2) << "\t\t not cloning because system collection" << endl;
continue;
}
}
@@ -422,7 +422,7 @@ namespace mongo {
dbtempreleaseif r( opts.mayYield );
}
BSONObj collection = *i;
- log(2) << " really will clone: " << collection << endl;
+ LOG(2) << " really will clone: " << collection << endl;
const char * from_name = collection["name"].valuestr();
BSONObj options = collection.getObjectField("options");
@@ -438,7 +438,7 @@ namespace mongo {
/* we defer building id index for performance - building it in batch is much faster */
userCreateNS(toname, options, err, opts.logForRepl, &wantIdIndex);
}
- log(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
+ LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
Query q;
if( opts.snapshot )
q.snapshot();
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index 5bc79869d54..0be270712b6 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -73,11 +73,11 @@ namespace mongo {
addrs = NULL;
if (logLevel >= 1) {
- log(1) << "getMyAddrs():";
+ LOG(1) << "getMyAddrs():";
for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
- log(1) << " [" << *it << ']';
+ LOG(1) << " [" << *it << ']';
}
- log(1) << endl;
+ LOG(1) << endl;
}
return out;
@@ -117,11 +117,11 @@ namespace mongo {
freeaddrinfo(addrs);
if (logLevel >= 1) {
- log(1) << "getallIPs(\"" << iporhost << "\"):";
+ LOG(1) << "getallIPs(\"" << iporhost << "\"):";
for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
- log(1) << " [" << *it << ']';
+ LOG(1) << " [" << *it << ']';
}
- log(1) << endl;
+ LOG(1) << endl;
}
return out;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 9c46ddc41ab..b283bca37ef 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -664,7 +664,7 @@ namespace mongo {
}
void State::bailFromJS() {
- log(1) << "M/R: Switching from JS mode to mixed mode" << endl;
+ LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl;
// reduce and reemit into c++
switchMode(false);
@@ -822,7 +822,7 @@ namespace mongo {
{
dbtempreleasecond tl;
if ( ! tl.unlocked() )
- log( LL_WARNING ) << "map/reduce can't temp release" << endl;
+ LOG( LL_WARNING ) << "map/reduce can't temp release" << endl;
// reduce and finalize last array
finalReduce( all );
}
@@ -932,7 +932,7 @@ namespace mongo {
// reduce now to lower mem usage
Timer t;
_scope->invoke(_reduceAll, 0, 0, 0, true);
- log(1) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() << "ms" << endl;
+ LOG(1) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() << "ms" << endl;
return;
}
}
@@ -945,12 +945,12 @@ namespace mongo {
long oldSize = _size;
Timer t;
reduceInMemory();
- log(1) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
+ LOG(1) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
// if size is still high, or values are not reducing well, dump
if ( _onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2) ) {
dumpToInc();
- log(1) << " MR - dumping to db" << endl;
+ LOG(1) << " MR - dumping to db" << endl;
}
}
}
@@ -1020,7 +1020,7 @@ namespace mongo {
Config config( dbname , cmd );
- log(1) << "mr ns: " << config.ns << endl;
+ LOG(1) << "mr ns: " << config.ns << endl;
uassert( 16149 , "cannot run map reduce without the js engine", globalScriptEngine );
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index dc19f555caa..e870e3793ed 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -288,7 +288,7 @@ namespace mongo {
static void repairDatabasesAndCheckVersion() {
// LastError * le = lastError.get( true );
Client::GodScope gs;
- log(1) << "enter repairDatabases (to check pdfile version #)" << endl;
+ LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl;
//verify(checkNsFilesOnLoad);
checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
@@ -298,7 +298,7 @@ namespace mongo {
getDatabaseNames( dbNames );
for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
string dbName = *i;
- log(1) << "\t" << dbName << endl;
+ LOG(1) << "\t" << dbName << endl;
Client::Context ctx( dbName );
MongoDataFile *p = cc().database()->getFile( 0 );
DataFileHeader *h = p->getHeader();
@@ -334,7 +334,7 @@ namespace mongo {
}
}
- log(1) << "done repairDatabases" << endl;
+ LOG(1) << "done repairDatabases" << endl;
if ( shouldRepairDatabases ) {
log() << "finished checking dbs" << endl;
@@ -407,7 +407,7 @@ namespace mongo {
else if( cmdLine.syncdelay == 1 )
log() << "--syncdelay 1" << endl;
else if( cmdLine.syncdelay != 60 )
- log(1) << "--syncdelay " << cmdLine.syncdelay << endl;
+ LOG(1) << "--syncdelay " << cmdLine.syncdelay << endl;
int time_flushing = 0;
while ( ! inShutdown() ) {
_diaglog.flush();
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index d448c9aecb0..8d1eec45eb0 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -937,7 +937,7 @@ namespace mongo {
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
- log(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
+ LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
}
@@ -1547,8 +1547,8 @@ namespace mongo {
DiskLoc extent = nsd->firstExtent;
for( ; excessSize > extent.ext()->length && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
excessSize -= extent.ext()->length;
- log( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
- log( 6 ) << "excessSize: " << excessSize << endl;
+ LOG( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
+ LOG( 6 ) << "excessSize: " << excessSize << endl;
}
DiskLoc startLoc = extent.ext()->firstRecord;
@@ -1862,7 +1862,7 @@ namespace mongo {
}
}
catch ( SendStaleConfigException& e ){
- log(1) << "command failed because of stale config, can retry" << causedBy( e ) << endl;
+ LOG(1) << "command failed because of stale config, can retry" << causedBy( e ) << endl;
throw;
}
catch ( DBException& e ) {
@@ -1959,7 +1959,7 @@ namespace mongo {
}
if ( c->adminOnly() )
- log( 2 ) << "command: " << cmdObj << endl;
+ LOG( 2 ) << "command: " << cmdObj << endl;
if (c->maintenanceMode() && theReplSet && theReplSet->isSecondary()) {
theReplSet->setMaintenanceMode(true);
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index bb26d132141..85b38a1ff78 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -397,7 +397,7 @@ namespace mongo {
_out = new ofstream();
_out->open( _file.string().c_str() , ios_base::out | ios_base::binary );
if ( ! _out->good() ) {
- log( LL_WARNING ) << "couldn't create file: " << _file.string() << " for remove saving" << endl;
+ LOG( LL_WARNING ) << "couldn't create file: " << _file.string() << " for remove saving" << endl;
delete _out;
_out = 0;
return;
diff --git a/src/mongo/db/dur.cpp b/src/mongo/db/dur.cpp
index 0b6bf376db7..c8b07d47dad 100644
--- a/src/mongo/db/dur.cpp
+++ b/src/mongo/db/dur.cpp
@@ -287,7 +287,7 @@ namespace mongo {
return false;
}
- log(1) << "commitIfNeeded upgrading from shared write to exclusive write state"
+ LOG(1) << "commitIfNeeded upgrading from shared write to exclusive write state"
<< endl;
Lock::DBWrite::UpgradeToExclusive ex;
if (ex.gotUpgrade()) {
diff --git a/src/mongo/db/dur_journal.cpp b/src/mongo/db/dur_journal.cpp
index 9957d415736..291055cd8b1 100644
--- a/src/mongo/db/dur_journal.cpp
+++ b/src/mongo/db/dur_journal.cpp
@@ -222,7 +222,7 @@ namespace mongo {
flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
- log(1) << "removeJournalFiles end" << endl;
+ LOG(1) << "removeJournalFiles end" << endl;
}
/** at clean shutdown */
diff --git a/src/mongo/db/durop.cpp b/src/mongo/db/durop.cpp
index dae10f0cbbc..f746af84245 100644
--- a/src/mongo/db/durop.cpp
+++ b/src/mongo/db/durop.cpp
@@ -124,7 +124,7 @@ namespace mongo {
boost::filesystem::remove(full);
}
catch(std::exception& e) {
- log(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
+ LOG(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
}
}
diff --git a/src/mongo/db/extsort.cpp b/src/mongo/db/extsort.cpp
index 982555ce8c1..5142475ea96 100644
--- a/src/mongo/db/extsort.cpp
+++ b/src/mongo/db/extsort.cpp
@@ -81,7 +81,7 @@ namespace mongo {
rootpath << "_tmp/esort." << time(0) << "." << thisUniqueNumber << "/";
_root = rootpath.str();
- log(1) << "external sort root: " << _root.string() << endl;
+ LOG(1) << "external sort root: " << _root.string() << endl;
create_directories( _root );
_compares = 0;
@@ -112,7 +112,7 @@ namespace mongo {
if ( _cur && _files.size() == 0 ) {
_sortInMem();
- log(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
+ LOG(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
return;
}
@@ -146,7 +146,7 @@ namespace mongo {
if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ) {
finishMap();
- log(1) << "finishing map" << endl;
+ LOG(1) << "finishing map" << endl;
}
}
@@ -186,7 +186,7 @@ namespace mongo {
_files.push_back( file );
out.close();
- log(2) << "Added file: " << file << " with " << num << "objects for external sort" << endl;
+ LOG(2) << "Added file: " << file << " with " << num << "objects for external sort" << endl;
}
// ---------------------------------
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index bcad610494c..c045cf003e9 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -176,7 +176,7 @@ namespace mongo {
Timer t;
- log(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance << " search: " << search << endl;
+ LOG(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance << " search: " << search << endl;
int x,y;
{
BSONObjIterator i( n );
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 274c3aa37d9..768294861c5 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -206,7 +206,7 @@ namespace mongo {
dropNS(ns.c_str());
}
catch(DBException& ) {
- log(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
+ LOG(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
}
head.setInvalid();
info.setInvalid();
@@ -317,7 +317,7 @@ namespace mongo {
}
if ( sourceNS.empty() || key.isEmpty() ) {
- log(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
+ LOG(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
sourceNS << "\n idxobj:" << io.toString() << endl;
string s = "bad add index attempt " + sourceNS + " key:" + key.toString();
uasserted(12504, s);
@@ -341,7 +341,7 @@ namespace mongo {
return false;
}
if( sourceCollection->findIndexByKeyPattern(key) >= 0 ) {
- log(2) << "index already exists with diff name " << name << ' ' << key.toString() << endl;
+ LOG(2) << "index already exists with diff name " << name << ' ' << key.toString() << endl;
return false;
}
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index e8633cf3734..7f52e6a30f0 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -170,7 +170,7 @@ namespace mongo {
_unindexRecord(d->idx(j), obj, loc, false);
}
catch(...) {
- log(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
+ LOG(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
}
}
throw;
@@ -262,7 +262,7 @@ namespace mongo {
}
pm.finished();
op->setMessage( "index: (3/3) btree-middle" );
- log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
+ LOG(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
btBuilder.commit();
if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
warning() << "not all entries were added to the index, probably some keys were too large" << endl;
@@ -320,7 +320,7 @@ namespace mongo {
phase1->sorter->sort();
if ( logLevel > 1 ) printMemInfo( "after final sort" );
- log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
+ LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
set<DiskLoc> dupsToDrop;
@@ -530,7 +530,7 @@ namespace mongo {
_unindexRecord(d->idx(j), obj, loc, false);
}
catch(...) {
- log(3) << "unindex fails on rollback after unique failure\n";
+ LOG(3) << "unindex fails on rollback after unique failure\n";
}
}
throw;
@@ -604,7 +604,7 @@ namespace mongo {
// delete a specific index or all?
if ( *name == '*' && name[1] == 0 ) {
- log(4) << " d->nIndexes was " << d->nIndexes << '\n';
+ LOG(4) << " d->nIndexes was " << d->nIndexes << '\n';
anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
IndexDetails *idIndex = 0;
if( d->nIndexes ) {
@@ -633,7 +633,7 @@ namespace mongo {
// delete just one index
int x = d->findIndexByName(name);
if ( x >= 0 ) {
- log(4) << " d->nIndexes was " << d->nIndexes << endl;
+ LOG(4) << " d->nIndexes was " << d->nIndexes << endl;
anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
/* note it is important we remove the IndexDetails with this
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index edbef6e882e..41382a22e08 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -470,10 +470,10 @@ namespace mongo {
if ( currentOp.shouldDBProfile( debug.executionTime ) ) {
// performance profiling is on
if ( Lock::isReadLocked() ) {
- mongo::log(1) << "note: not profiling because recursive read lock" << endl;
+ LOG(1) << "note: not profiling because recursive read lock" << endl;
}
else if ( lockedForWriting() ) {
- mongo::log(1) << "note: not profiling because doing fsync+lock" << endl;
+ LOG(1) << "note: not profiling because doing fsync+lock" << endl;
}
else {
profile(c, op, currentOp);
@@ -493,14 +493,14 @@ namespace mongo {
uassert( 13004 , str::stream() << "sent negative cursors to kill: " << n , n >= 1 );
if ( n > 2000 ) {
- log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
verify( n < 30000 );
}
int found = ClientCursor::erase(n, (long long *) x);
if ( logLevel > 0 || found != n ) {
- log( found == n ) << "killcursors: found " << found << " of " << n << endl;
+ LOG( found == n ) << "killcursors: found " << found << " of " << n << endl;
}
}
@@ -1064,7 +1064,7 @@ namespace mongo {
#endif
// block the dur thread from doing any work for the rest of the run
- log(2) << "shutdown: groupCommitMutex" << endl;
+ LOG(2) << "shutdown: groupCommitMutex" << endl;
SimpleMutex::scoped_lock lk(dur::commitJob.groupCommitMutex);
#ifdef _WIN32
diff --git a/src/mongo/db/oplog.cpp b/src/mongo/db/oplog.cpp
index 1cd06a96d80..62766447f3a 100644
--- a/src/mongo/db/oplog.cpp
+++ b/src/mongo/db/oplog.cpp
@@ -224,7 +224,7 @@ namespace mongo {
append_O_Obj(r->data(), partial, obj);
if ( logLevel >= 6 ) {
- log( 6 ) << "logOp:" << BSONObj::make(r) << endl;
+ LOG( 6 ) << "logOp:" << BSONObj::make(r) << endl;
}
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 6719b73cc5e..db0372608f8 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -166,7 +166,7 @@ namespace mongo {
void ensureIdIndexForNewNs(const char *ns) {
if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
strstr( ns, FREELIST_NS ) == 0 ) {
- log( 1 ) << "adding _id index for collection " << ns << endl;
+ LOG( 1 ) << "adding _id index for collection " << ns << endl;
ensureHaveIdIndex( ns );
}
}
@@ -236,7 +236,7 @@ namespace mongo {
}
bool _userCreateNS(const char *ns, const BSONObj& options, string& err, bool *deferIdIndex) {
- log(1) << "create collection " << ns << ' ' << options << endl;
+ LOG(1) << "create collection " << ns << ' ' << options << endl;
if ( nsdetails(ns) ) {
err = "collection already exists";
@@ -605,7 +605,7 @@ namespace mongo {
}
}
- if( n > 128 ) log( n < 512 ) << "warning: newExtent " << n << " scanned\n";
+ if( n > 128 ) LOG( n < 512 ) << "warning: newExtent " << n << " scanned\n";
if( best ) {
Extent *e = best;
@@ -978,7 +978,7 @@ namespace mongo {
}
void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result ) {
- log(1) << "dropCollection: " << name << endl;
+ LOG(1) << "dropCollection: " << name << endl;
NamespaceDetails *d = nsdetails(name.c_str());
if( d == 0 )
return;
@@ -997,7 +997,7 @@ namespace mongo {
}
verify( d->nIndexes == 0 );
}
- log(1) << "\t dropIndexes done" << endl;
+ LOG(1) << "\t dropIndexes done" << endl;
result.append("ns", name.c_str());
ClientCursor::invalidate(name.c_str());
Top::global.collectionDropped( name );
@@ -1324,7 +1324,7 @@ namespace mongo {
NOINLINE_DECL DiskLoc outOfSpace(const char *ns, NamespaceDetails *d, int lenWHdr, bool god, DiskLoc extentLoc) {
DiskLoc loc;
if ( ! d->isCapped() ) { // size capped doesn't grow
- log(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor() << " lenWHdr: " << lenWHdr << endl;
+ LOG(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor() << " lenWHdr: " << lenWHdr << endl;
cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
loc = d->alloc(ns, lenWHdr, extentLoc);
if ( loc.isNull() ) {
@@ -1687,7 +1687,7 @@ namespace mongo {
}
void dropDatabase(string db) {
- log(1) << "dropDatabase " << db << endl;
+ LOG(1) << "dropDatabase " << db << endl;
Lock::assertWriteLocked(db);
Database *d = cc().database();
verify( d );
@@ -1903,7 +1903,7 @@ namespace mongo {
bool ok = false;
MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply( q ) );
if ( ok )
- log(2) << fo.op() << " file " << q.string() << endl;
+ LOG(2) << fo.op() << " file " << q.string() << endl;
int i = 0;
int extra = 10; // should not be necessary, this is defensive in case there are missing files
while ( 1 ) {
@@ -1914,7 +1914,7 @@ namespace mongo {
MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply(q) );
if ( ok ) {
if ( extra != 10 ) {
- log(1) << fo.op() << " file " << q.string() << endl;
+ LOG(1) << fo.op() << " file " << q.string() << endl;
log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
}
}
@@ -1947,7 +1947,7 @@ namespace mongo {
int nNotClosed = 0;
for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) {
string name = *i;
- log(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl;
+ LOG(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl;
Client::Context ctx( name , path );
if( !force && BackgroundOperation::inProgForDb(name.c_str()) ) {
log() << "WARNING: can't close database " << name << " because a bg job is in progress - try killOp command" << endl;
diff --git a/src/mongo/db/projection.cpp b/src/mongo/db/projection.cpp
index 6c598f6daa7..9c98166fcc3 100644
--- a/src/mongo/db/projection.cpp
+++ b/src/mongo/db/projection.cpp
@@ -176,7 +176,7 @@ namespace mongo {
MatchDetails arrayDetails;
arrayDetails.requestElemMatchKey();
if ( matcher->second->matches( in, &arrayDetails ) ) {
- log(4) << "Matched array on field: " << matcher->first << endl
+ LOG(4) << "Matched array on field: " << matcher->first << endl
<< " from array: " << in.getField( matcher->first ) << endl
<< " in object: " << in << endl
<< " at position: " << arrayDetails.elemMatchKey() << endl;
@@ -282,7 +282,7 @@ namespace mongo {
if ( details && arrayOpType == ARRAY_OP_POSITIONAL ) {
// $ positional operator specified
- log(4) << "projection: checking if element " << e << " matched spec: "
+ LOG(4) << "projection: checking if element " << e << " matched spec: "
<< getSpec() << " match details: " << *details << endl;
uassert( 16352, mongoutils::str::stream() << "positional operator ("
<< e.fieldName()
@@ -333,7 +333,7 @@ namespace mongo {
mongoutils::str::before( projectionElement.fieldName(), "." ) ) {
// found query spec that matches positional array projection spec
- log(4) << "Query specifies field named for positional operator: "
+ LOG(4) << "Query specifies field named for positional operator: "
<< queryElement.fieldName() << endl;
return;
}
diff --git a/src/mongo/db/queryoptimizer.cpp b/src/mongo/db/queryoptimizer.cpp
index 40af04ce450..71d8c26c771 100644
--- a/src/mongo/db/queryoptimizer.cpp
+++ b/src/mongo/db/queryoptimizer.cpp
@@ -1025,7 +1025,7 @@ doneCheckOrder:
massert( 10369 , "no plans", _plans._plans.size() > 0 );
if ( _plans._plans.size() > 1 )
- log(1) << " running multiple plans" << endl;
+ LOG(1) << " running multiple plans" << endl;
for( PlanSet::iterator i = _plans._plans.begin(); i != _plans._plans.end(); ++i ) {
shared_ptr<QueryOp> op( _op.createChild() );
op->setQueryPlan( i->get() );
diff --git a/src/mongo/db/repl.cpp b/src/mongo/db/repl.cpp
index c73f7979be5..31df40698b9 100644
--- a/src/mongo/db/repl.cpp
+++ b/src/mongo/db/repl.cpp
@@ -336,7 +336,7 @@ namespace mongo {
BSONObj pattern = b.done();
BSONObj o = jsobj();
- log( 1 ) << "Saving repl source: " << o << endl;
+ LOG( 1 ) << "Saving repl source: " << o << endl;
{
OpDebug debug;
@@ -648,7 +648,7 @@ namespace mongo {
*/
void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked) {
if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
- log(6) << "processing op: " << op << endl;
+ LOG(6) << "processing op: " << op << endl;
if( op.getStringField("op")[0] == 'n' )
return;
@@ -730,7 +730,7 @@ namespace mongo {
bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
if( logLevel >= 6 )
- log(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
+ LOG(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
// always apply admin command command
// this is a bit hacky -- the semantics of replication/commands aren't well specified
@@ -794,7 +794,7 @@ namespace mongo {
int ReplSource::sync_pullOpLog(int& nApplied) {
int okResultCode = 1;
string ns = string("local.oplog.$") + sourceName();
- log(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
+ LOG(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
bool tailing = true;
oplogReader.tailCheck();
@@ -817,7 +817,7 @@ namespace mongo {
if ( !e.embeddedObject().getBoolField( "empty" ) ) {
if ( name != "local" ) {
if ( only.empty() || only == name ) {
- log( 2 ) << "adding to 'addDbNextPass': " << name << endl;
+ LOG( 2 ) << "adding to 'addDbNextPass': " << name << endl;
addDbNextPass.insert( name );
}
}
@@ -845,7 +845,7 @@ namespace mongo {
tailing = false;
}
else {
- log(2) << "repl: tailing=true\n";
+ LOG(2) << "repl: tailing=true\n";
}
if( !oplogReader.haveCursor() ) {
@@ -868,7 +868,7 @@ namespace mongo {
if ( !oplogReader.more() ) {
if ( tailing ) {
- log(2) << "repl: tailing & no new activity\n";
+ LOG(2) << "repl: tailing & no new activity\n";
if( oplogReader.awaitCapable() )
okResultCode = 0; // don't sleep
@@ -907,9 +907,9 @@ namespace mongo {
}
nextOpTime = OpTime( ts.date() );
- log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
+ LOG(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
if ( initial ) {
- log(1) << "repl: initial run\n";
+ LOG(1) << "repl: initial run\n";
}
if( tailing ) {
if( !( syncedTo < nextOpTime ) ) {
@@ -1125,7 +1125,7 @@ namespace mongo {
BSONObj res;
bool ok = conn->runCommand( "admin" , cmd.obj() , res );
// ignoring for now on purpose for older versions
- log(ok) << "replHandshake res not: " << ok << " res: " << res << endl;
+ LOG(ok) << "replHandshake res not: " << ok << " res: " << res << endl;
return true;
}
@@ -1231,7 +1231,7 @@ namespace mongo {
}
if ( !oplogReader.connect(hostName) ) {
- log(4) << "repl: can't connect to sync source" << endl;
+ LOG(4) << "repl: can't connect to sync source" << endl;
return -1;
}
@@ -1411,7 +1411,7 @@ namespace mongo {
}
}
else {
- log(5) << "couldn't logKeepalive" << endl;
+ LOG(5) << "couldn't logKeepalive" << endl;
toSleep = 1;
}
}
@@ -1482,12 +1482,12 @@ namespace mongo {
if ( replSettings.slave ) {
verify( replSettings.slave == SimpleSlave );
- log(1) << "slave=true" << endl;
+ LOG(1) << "slave=true" << endl;
boost::thread repl_thread(replSlaveThread);
}
if ( replSettings.master ) {
- log(1) << "master=true" << endl;
+ LOG(1) << "master=true" << endl;
replSettings.master = true;
createOplog();
boost::thread t(replMasterThread);
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index c420cae4fc4..c768f706729 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -415,7 +415,7 @@ namespace replset {
string current = target->fullName();
if (!r.connect(current)) {
- log(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
+ LOG(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
r.resetConnection();
theReplSet->veto(current);
continue;
diff --git a/src/mongo/db/repl/consensus.cpp b/src/mongo/db/repl/consensus.cpp
index d0e5de8687c..9d0f7b8c20e 100644
--- a/src/mongo/db/repl/consensus.cpp
+++ b/src/mongo/db/repl/consensus.cpp
@@ -415,7 +415,7 @@ namespace mongo {
}
else {
/* succeeded. */
- log(1) << "replSet election succeeded, assuming primary role" << rsLog;
+ LOG(1) << "replSet election succeeded, assuming primary role" << rsLog;
success = true;
rs.assumePrimary();
}
diff --git a/src/mongo/db/repl/manager.cpp b/src/mongo/db/repl/manager.cpp
index a35d69da544..72452322e08 100644
--- a/src/mongo/db/repl/manager.cpp
+++ b/src/mongo/db/repl/manager.cpp
@@ -252,7 +252,7 @@ namespace mongo {
int ll = 0;
if( ++n > 5 ) ll++;
if( last + 60 > time(0 ) ) ll++;
- log(ll) << "replSet can't see a majority, will not try to elect self" << rsLog;
+ LOG(ll) << "replSet can't see a majority, will not try to elect self" << rsLog;
last = time(0);
return;
}
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index 0469e733128..713dc622629 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -73,7 +73,7 @@ namespace mongo {
}
if( !s.empty() ) {
lastLogged = _hbmsgTime;
- log(logLevel) << "replSet " << s << rsLog;
+ LOG(logLevel) << "replSet " << s << rsLog;
}
}
@@ -353,7 +353,7 @@ namespace mongo {
seedSet.insert(m);
//uassert(13101, "can't use localhost in replset host list", !m.isLocalHost());
if( m.isSelf() ) {
- log(1) << "replSet ignoring seed " << m.toString() << " (=self)" << rsLog;
+ LOG(1) << "replSet ignoring seed " << m.toString() << " (=self)" << rsLog;
}
else
seeds.push_back(m);
@@ -658,7 +658,7 @@ namespace mongo {
int n = 0;
for( vector<ReplSetConfig>::iterator i = cfgs.begin(); i != cfgs.end(); i++ ) {
ReplSetConfig& cfg = *i;
- DEV log(1) << n+1 << " config shows version " << cfg.version << rsLog;
+ DEV LOG(1) << n+1 << " config shows version " << cfg.version << rsLog;
if( ++n == 1 ) myVersion = cfg.version;
if( cfg.ok() && cfg.version > v ) {
highest = &cfg;
@@ -718,7 +718,7 @@ namespace mongo {
configs.push_back( ReplSetConfig(HostAndPort(*i)) );
}
catch( DBException& ) {
- log(1) << "replSet exception trying to load config from discovered seed " << *i << rsLog;
+ LOG(1) << "replSet exception trying to load config from discovered seed " << *i << rsLog;
replSettings.discoveredSeeds.erase(*i);
}
}
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
index 8d1f47c4f1e..27e54cd5dae 100644
--- a/src/mongo/db/repl/rs_config.cpp
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -674,14 +674,14 @@ namespace mongo {
}
catch( DBException& e) {
version = v;
- log(level) << "replSet load config couldn't get from " << h.toString() << ' ' << e.what() << rsLog;
+ LOG(level) << "replSet load config couldn't get from " << h.toString() << ' ' << e.what() << rsLog;
return;
}
from(cfg);
checkRsConfig();
_ok = true;
- log(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
+ LOG(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
_constructed = true;
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index c912116f9d5..043845873d5 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -430,7 +430,7 @@ namespace mongo {
try {
bob res;
string errmsg;
- log(1) << "replSet rollback drop: " << *i << rsLog;
+ LOG(1) << "replSet rollback drop: " << *i << rsLog;
dropCollection(*i, errmsg, res);
}
catch(...) {
diff --git a/src/mongo/db/repl_block.cpp b/src/mongo/db/repl_block.cpp
index 42d03bb9f94..c312bfab51e 100644
--- a/src/mongo/db/repl_block.cpp
+++ b/src/mongo/db/repl_block.cpp
@@ -243,7 +243,7 @@ namespace mongo {
if (theReplSet && !theReplSet->isPrimary()) {
// we don't know the slave's port, so we make the replica set keep
// a map of rids to slaves
- log(2) << "percolating " << lastOp.toString() << " from " << rid << endl;
+ LOG(2) << "percolating " << lastOp.toString() << " from " << rid << endl;
theReplSet->ghost->send( boost::bind(&GhostSync::percolate, theReplSet->ghost, rid, lastOp) );
}
}
diff --git a/src/mongo/db/security_commands.cpp b/src/mongo/db/security_commands.cpp
index 6dbbe3dabec..00a18d06c8e 100644
--- a/src/mongo/db/security_commands.cpp
+++ b/src/mongo/db/security_commands.cpp
@@ -91,12 +91,12 @@ namespace mongo {
scoped_ptr<nonce64> ln(lastNonce.release());
if ( !ln ) {
reject = true;
- log(1) << "auth: no lastNonce" << endl;
+ LOG(1) << "auth: no lastNonce" << endl;
}
else {
digestBuilder << hex << *ln;
reject = digestBuilder.str() != received_nonce;
- if ( reject ) log(1) << "auth: different lastNonce" << endl;
+ if ( reject ) LOG(1) << "auth: different lastNonce" << endl;
}
if ( reject ) {
diff --git a/src/mongo/db/security_common.cpp b/src/mongo/db/security_common.cpp
index 3740d7562e0..bf0628fd0a7 100644
--- a/src/mongo/db/security_common.cpp
+++ b/src/mongo/db/security_common.cpp
@@ -107,7 +107,7 @@ namespace mongo {
return false;
}
- log(1) << "security key: " << str << endl;
+ LOG(1) << "security key: " << str << endl;
// createPWDigest should really not be a member func
DBClientConnection conn;
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index d8965e46383..9e135cfcc67 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -618,15 +618,15 @@ namespace ThreadedTests {
sleepmillis(100*x);
int Z = 1;
- log(Z) << x << ' ' << what[x] << " request" << endl;
+ LOG(Z) << x << ' ' << what[x] << " request" << endl;
char ch = what[x];
switch( ch ) {
case 'w':
{
m.lock();
- log(Z) << x << " w got" << endl;
+ LOG(Z) << x << " w got" << endl;
sleepmillis(100);
- log(Z) << x << " w unlock" << endl;
+ LOG(Z) << x << " w unlock" << endl;
m.unlock();
}
break;
@@ -635,7 +635,7 @@ namespace ThreadedTests {
{
Timer t;
RWLock::Upgradable u(m);
- log(Z) << x << ' ' << ch << " got" << endl;
+ LOG(Z) << x << ' ' << ch << " got" << endl;
if( ch == 'U' ) {
#ifdef MONGO_USE_SRW_ON_WINDOWS
// SRW locks are neither fair nor FIFO, as per docs
@@ -654,7 +654,7 @@ namespace ThreadedTests {
}
}
sleepsecs(1);
- log(Z) << x << ' ' << ch << " unlock" << endl;
+ LOG(Z) << x << ' ' << ch << " unlock" << endl;
}
break;
case 'r':
@@ -662,7 +662,7 @@ namespace ThreadedTests {
{
Timer t;
m.lock_shared();
- log(Z) << x << ' ' << ch << " got " << endl;
+ LOG(Z) << x << ' ' << ch << " got " << endl;
if( what[x] == 'R' ) {
if( t.millis() > 15 ) {
// commented out for less chatter, we aren't using upgradeable anyway right now:
@@ -670,7 +670,7 @@ namespace ThreadedTests {
}
}
sleepmillis(200);
- log(Z) << x << ' ' << ch << " unlock" << endl;
+ LOG(Z) << x << ' ' << ch << " unlock" << endl;
m.unlock_shared();
}
break;
@@ -822,24 +822,24 @@ namespace ThreadedTests {
int Z = 0;
Client::initThread("utest");
if( x == 1 ) {
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1" << endl;
rwlock_shared lk(m);
sleepmillis(300);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
}
if( x == 2 ) {
sleepmillis(100);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2" << endl;
rwlock lk(m, true);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
}
if( x == 3 ) {
sleepmillis(200);
Timer t;
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3" << endl;
rwlock_shared lk(m);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
- log(Z) << t.millis() << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
+ LOG(Z) << t.millis() << endl;
ASSERT( t.millis() > 50 );
}
cc().shutdown();
@@ -859,18 +859,18 @@ namespace ThreadedTests {
int Z = 0;
Client::initThread("qtest");
if( x == 1 ) {
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1 lock_r()..." << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1 lock_r()..." << endl;
m.lock_r();
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1 got" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1 got" << endl;
sleepmillis(300);
m.unlock_r();
- log(Z) << mongo::curTimeMillis64() % 10000 << " 1 unlock_r()" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 1 unlock_r()" << endl;
}
if( x == 2 || x == 4 ) {
sleepmillis(x*50);
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2 lock_W()..." << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2 lock_W()..." << endl;
m.lock_W();
- log(Z) << mongo::curTimeMillis64() % 10000 << " 2 got" << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 2 got" << endl;
gotW = true;
m.unlock_W();
}
@@ -878,12 +878,12 @@ namespace ThreadedTests {
sleepmillis(200);
Timer t;
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3 lock_r()..." << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3 lock_r()..." << endl;
m.lock_r();
verify( gotW );
- log(Z) << mongo::curTimeMillis64() % 10000 << " 3 got" << gotW << endl;
+ LOG(Z) << mongo::curTimeMillis64() % 10000 << " 3 got" << gotW << endl;
m.unlock_r();
- log(Z) << t.millis() << endl;
+ LOG(Z) << t.millis() << endl;
ASSERT( t.millis() > 50 );
}
cc().shutdown();
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 69a49031f82..23abff1a0de 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -331,7 +331,7 @@ namespace mongo {
);
fromconn->done();
- log( worked ) << "moveChunk result: " << res << endl;
+ LOG( worked ) << "moveChunk result: " << res << endl;
// if succeeded, needs to reload to pick up the new location
// if failed, mongos may be stale
@@ -1327,7 +1327,7 @@ namespace mongo {
}
catch (...) {
- log( LL_ERROR ) << "\t invalid ChunkRangeMap! printing ranges:" << endl;
+ LOG( LL_ERROR ) << "\t invalid ChunkRangeMap! printing ranges:" << endl;
for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it)
cout << it->first << ": " << *it->second << endl;
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 8c8a2d2fbbf..ca28ca0c004 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -809,7 +809,7 @@ namespace mongo {
}
if ( up == 1 ) {
- log( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
+ LOG( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
return true;
}
@@ -829,7 +829,7 @@ namespace mongo {
stringstream ss;
ss << "config servers " << _config[firstGood] << " and " << _config[i] << " differ";
- log( LL_WARNING ) << ss.str();
+ LOG( LL_WARNING ) << ss.str() << endl;
if ( tries <= 1 ) {
ss << "\n" << c1 << "\t" << c2 << "\n" << d1 << "\t" << d2;
errmsg = ss.str();
@@ -849,7 +849,7 @@ namespace mongo {
if ( checkConsistency ) {
string errmsg;
if ( ! checkConfigServersConsistent( errmsg ) ) {
- log( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings;
+ LOG( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings;
return false;
}
}
@@ -1030,7 +1030,7 @@ namespace mongo {
try {
Shard s = Shard::lookupRSName(monitor->getName());
if (s == Shard::EMPTY) {
- log(1) << "replicaSetChange: shard not found for set: " << monitor->getServerAddress() << endl;
+ LOG(1) << "replicaSetChange: shard not found for set: " << monitor->getServerAddress() << endl;
return;
}
scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getInternalScopedDbConnection(
diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
index 9d392b0a6cf..34e787cfb80 100644
--- a/src/mongo/s/cursors.cpp
+++ b/src/mongo/s/cursors.cpp
@@ -232,7 +232,7 @@ namespace mongo {
int n = *x++;
if ( n > 2000 ) {
- log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
}
@@ -245,7 +245,7 @@ namespace mongo {
LOG(_myLogLevel) << "CursorCache::gotKillCursors id: " << id << endl;
if ( ! id ) {
- log( LL_WARNING ) << " got cursor id of 0 to kill" << endl;
+ LOG( LL_WARNING ) << " got cursor id of 0 to kill" << endl;
continue;
}
@@ -261,7 +261,7 @@ namespace mongo {
MapNormal::iterator j = _refs.find( id );
if ( j == _refs.end() ) {
- log( LL_WARNING ) << "can't find cursor: " << id << endl;
+ LOG( LL_WARNING ) << "can't find cursor: " << id << endl;
continue;
}
server = j->second;
@@ -295,7 +295,7 @@ namespace mongo {
if ( idleFor < TIMEOUT ) {
continue;
}
- log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1)
+ log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make LOG(1)
_cursors.erase( i );
i = _cursors.begin(); // possible 2nd entry will get skipped, will get on next pass
if ( i == _cursors.end() )
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 6b25d257f54..1d8867c311e 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1071,7 +1071,7 @@ namespace mongo {
conn->done();
- log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
+ LOG(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
if ( ! ok || res["state"].String() == "fail" ) {
warning() << "moveChunk error transferring data caused migration abort: " << res << migrateLog;
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 6168429b4ef..3e3e3135a26 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -583,7 +583,7 @@ namespace mongo {
result.append( "requestedMin" , min );
result.append( "requestedMax" , max );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
+ LOG( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
<< " is now " << currMin << "->" << currMax << endl;
return false;
}
@@ -593,7 +593,7 @@ namespace mongo {
result.append( "from" , myShard.getName() );
result.append( "official" , shard );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": chunk is at " << shard
+ LOG( LL_WARNING ) << "aborted split because " << errmsg << ": chunk is at " << shard
<< " and not at " << myShard.getName() << endl;
return false;
}
@@ -603,7 +603,7 @@ namespace mongo {
maxVersion.addToBSON( result, "officialVersion" );
shardingState.getVersion( ns ).addToBSON( result, "myVersion" );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
+ LOG( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
<< " mine: " << shardingState.getVersion(ns) << endl;
return false;
}
diff --git a/src/mongo/s/s_only.cpp b/src/mongo/s/s_only.cpp
index ef62b532421..c9e3c9e520f 100644
--- a/src/mongo/s/s_only.cpp
+++ b/src/mongo/s/s_only.cpp
@@ -100,7 +100,7 @@ namespace mongo {
log() << "command denied: " << cmdObj.toString() << endl;
return false;
}
- log( 2 ) << "command: " << cmdObj << endl;
+ LOG( 2 ) << "command: " << cmdObj << endl;
}
if (!client.getAuthenticationInfo()->isAuthorized(dbname)) {
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index a1d0673ca3c..e7a963078c1 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -104,7 +104,7 @@ namespace mongo {
r.process();
}
catch ( AssertionException & e ) {
- log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;
+ LOG( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;
le->raiseError( e.getCode() , e.what() );
diff --git a/src/mongo/s/strategy_shard.cpp b/src/mongo/s/strategy_shard.cpp
index 19507f29fb9..b45e520bb36 100644
--- a/src/mongo/s/strategy_shard.cpp
+++ b/src/mongo/s/strategy_shard.cpp
@@ -290,7 +290,7 @@ namespace mongo {
// targeting we've done earlier
//
- log( retries == 0 ) << op << " will be retried b/c sharding config info is stale, "
+ LOG( retries == 0 ) << op << " will be retried b/c sharding config info is stale, "
<< " retries: " << retries
<< " ns: " << ns
<< " data: " << query << endl;
diff --git a/src/mongo/s/strategy_single.cpp b/src/mongo/s/strategy_single.cpp
index 442e1d23691..f642274ccff 100644
--- a/src/mongo/s/strategy_single.cpp
+++ b/src/mongo/s/strategy_single.cpp
@@ -189,7 +189,7 @@ namespace mongo {
b.append( "err" , "can't do unlock through mongos" );
}
else {
- log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
+ LOG( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
return false;
}
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 9da44fe9b3e..079a1411c59 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -354,7 +354,7 @@ namespace mongo {
else {
// this means that the Scope was killed from a different thread
// for example a cursor got timed out that has a $where clause
- log(3) << "warning: scopeCache is empty!" << endl;
+ LOG(3) << "warning: scopeCache is empty!" << endl;
delete _real;
_real = 0;
}
diff --git a/src/mongo/scripting/engine_v8.cpp b/src/mongo/scripting/engine_v8.cpp
index 15d0fba2f29..ef019d45a34 100644
--- a/src/mongo/scripting/engine_v8.cpp
+++ b/src/mongo/scripting/engine_v8.cpp
@@ -310,7 +310,7 @@ namespace mongo {
void gcCallback(GCType type, GCCallbackFlags flags) {
HeapStatistics stats;
V8::GetHeapStatistics( &stats );
- log(1) << "V8 GC heap stats - "
+ LOG(1) << "V8 GC heap stats - "
<< " total: " << stats.total_heap_size()
<< " exec: " << stats.total_heap_size_executable()
<< " used: " << stats.used_heap_size()<< " limit: "
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index d92238b60ea..fdffe51957a 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -199,7 +199,7 @@ public:
// skip namespaces with $ in them only if we don't specify a collection to dump
if ( _coll == "" && name.find( ".$" ) != string::npos ) {
- log(1) << "\tskipping collection: " << name << endl;
+ LOG(1) << "\tskipping collection: " << name << endl;
continue;
}
@@ -287,7 +287,7 @@ public:
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
- log(1) << loc << endl;
+ LOG(1) << loc << endl;
Record* rec = loc.rec();
BSONObj obj;
try {
diff --git a/src/mongo/tools/import.cpp b/src/mongo/tools/import.cpp
index faa7ed9ad55..f0dc9c7bfc7 100644
--- a/src/mongo/tools/import.cpp
+++ b/src/mongo/tools/import.cpp
@@ -126,7 +126,7 @@ class Import : public Tool {
uassert(16329, str::stream() << "read error, or input line too long (max length: "
<< BUF_SIZE << ")", !(in->rdstate() & ios_base::failbit));
- log(1) << "got line:" << buf << endl;
+ LOG(1) << "got line:" << buf << endl;
}
uassert( 10263 , "unknown error reading file" ,
(!(in->rdstate() & ios_base::badbit)) &&
@@ -328,7 +328,7 @@ public:
return -1;
}
- log(1) << "ns: " << ns << endl;
+ LOG(1) << "ns: " << ns << endl;
auth();
@@ -390,7 +390,7 @@ public:
}
time_t start = time(0);
- log(1) << "filesize: " << fileSize << endl;
+ LOG(1) << "filesize: " << fileSize << endl;
ProgressMeter pm( fileSize );
int num = 0;
int errors = 0;
diff --git a/src/mongo/tools/restore.cpp b/src/mongo/tools/restore.cpp
index 4ec0ef1c538..7a493ceeddc 100644
--- a/src/mongo/tools/restore.cpp
+++ b/src/mongo/tools/restore.cpp
@@ -171,7 +171,7 @@ public:
}
void drillDown( boost::filesystem::path root, bool use_db, bool use_coll, bool top_level=false ) {
- log(2) << "drillDown: " << root.string() << endl;
+ LOG(2) << "drillDown: " << root.string() << endl;
// skip hidden files and directories
if (root.leaf()[0] == '.' && root.leaf() != ".")
@@ -474,7 +474,7 @@ private:
}
}
BSONObj o = bo.obj();
- log(0) << "\tCreating index: " << o << endl;
+ LOG(0) << "\tCreating index: " << o << endl;
conn().insert( _curdb + ".system.indexes" , o );
// We're stricter about errors for indexes than for regular data
diff --git a/src/mongo/tools/tool.cpp b/src/mongo/tools/tool.cpp
index 9e0033f7037..ea90278e952 100644
--- a/src/mongo/tools/tool.cpp
+++ b/src/mongo/tools/tool.cpp
@@ -480,7 +480,7 @@ namespace mongo {
posix_fadvise(fileno(file), 0, fileLength, POSIX_FADV_SEQUENTIAL);
#endif
- log(1) << "\t file size: " << fileLength << endl;
+ LOG(1) << "\t file size: " << fileLength << endl;
unsigned long long read = 0;
unsigned long long num = 0;
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index 44aaaabd5e6..d1e10333372 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -113,9 +113,9 @@ namespace mongo {
Result * Suite::run( const std::string& filter, int runsPerTest ) {
- log(1) << "\t about to setupTests" << std::endl;
+ LOG(1) << "\t about to setupTests" << std::endl;
setupTests();
- log(1) << "\t done setupTests" << std::endl;
+ LOG(1) << "\t done setupTests" << std::endl;
Result * r = new Result( _name );
Result::cur = r;
@@ -123,7 +123,7 @@ namespace mongo {
for ( std::vector<TestHolder*>::iterator i=_tests.begin(); i!=_tests.end(); i++ ) {
TestHolder* tc = *i;
if ( filter.size() && tc->getName().find( filter ) == std::string::npos ) {
- log(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << std::endl;
+ LOG(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << std::endl;
continue;
}
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index f1c89633268..4fca89b5310 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -64,10 +64,10 @@ namespace mongo {
run();
}
catch ( std::exception& e ) {
- log( LL_ERROR ) << "backgroundjob " << name() << "error: " << e.what() << endl;
+ LOG( LL_ERROR ) << "backgroundjob " << name() << "error: " << e.what() << endl;
}
catch(...) {
- log( LL_ERROR ) << "uncaught exception in BackgroundJob " << name() << endl;
+ LOG( LL_ERROR ) << "uncaught exception in BackgroundJob " << name() << endl;
}
{
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
index 905029736bf..07a962b42a2 100644
--- a/src/mongo/util/file_allocator.cpp
+++ b/src/mongo/util/file_allocator.cpp
@@ -155,7 +155,7 @@ namespace mongo {
void FileAllocator::ensureLength(int fd , long size) {
#if !defined(_WIN32)
if (useSparseFiles(fd)) {
- log(1) << "using ftruncate to create a sparse file" << endl;
+ LOG(1) << "using ftruncate to create a sparse file" << endl;
int ret = ftruncate(fd, size);
uassert(16063, "ftruncate failed: " + errnoWithDescription(), ret == 0);
return;
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index 0d77a744bfa..f22d7f0ff03 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -1,4 +1,4 @@
-// listen.h
+// listen.cpp
/* Copyright 2009 10gen Inc.
*
@@ -368,7 +368,7 @@ namespace mongo {
int max = (int)(limit.rlim_cur * .8);
- log(1) << "fd limit"
+ LOG(1) << "fd limit"
<< " hard:" << limit.rlim_max
<< " soft:" << limit.rlim_cur
<< " max conn: " << max
@@ -388,7 +388,7 @@ namespace mongo {
if ( current < want ) {
// they want fewer than they can handle
// which is fine
- log(1) << " only allowing " << current << " connections" << endl;
+ LOG(1) << " only allowing " << current << " connections" << endl;
return;
}
if ( current > want ) {
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index bd6d3ee0b71..466987917eb 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -171,7 +171,7 @@ again:
if ( len == 542393671 ) {
// an http GET
- log( psock->getLogLevel() ) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
+ LOG( psock->getLogLevel() ) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
string msg = "You are trying to access MongoDB on the native driver port. For http diagnostic access, add 1000 to the port number\n";
stringstream ss;
ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
@@ -179,7 +179,7 @@ again:
send( s.c_str(), s.size(), "http" );
return false;
}
- log(0) << "recv(): message len " << len << " is too large" << len << endl;
+ LOG(0) << "recv(): message len " << len << " is too large" << len << endl;
return false;
}
@@ -201,7 +201,7 @@ again:
}
catch ( const SocketException & e ) {
- log(psock->getLogLevel() + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
+ LOG(psock->getLogLevel() + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
m.reset();
return false;
}
diff --git a/src/mongo/util/net/miniwebserver.cpp b/src/mongo/util/net/miniwebserver.cpp
index c0d1bf24fc4..4a2ec0ab43b 100644
--- a/src/mongo/util/net/miniwebserver.cpp
+++ b/src/mongo/util/net/miniwebserver.cpp
@@ -171,7 +171,7 @@ namespace mongo {
psock->close();
}
catch ( SocketException& e ) {
- log(1) << "couldn't send data to http client: " << e << endl;
+ LOG(1) << "couldn't send data to http client: " << e << endl;
}
}
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index 25a97e8029e..802a9f61854 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -574,7 +574,7 @@ namespace mongo {
_fd = socket(remote.getType(), SOCK_STREAM, 0);
if ( _fd == INVALID_SOCKET ) {
- log(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
+ LOG(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
return false;
}
@@ -639,12 +639,12 @@ namespace mongo {
const int mongo_errno = errno;
if ( ( mongo_errno == EAGAIN || mongo_errno == EWOULDBLOCK ) && _timeout != 0 ) {
#endif
- log(_logLevel) << "Socket " << context << " send() timed out " << _remote.toString() << endl;
+ LOG(_logLevel) << "Socket " << context << " send() timed out " << _remote.toString() << endl;
throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
}
else {
SocketException::Type t = SocketException::SEND_ERROR;
- log(_logLevel) << "Socket " << context << " send() "
+ LOG(_logLevel) << "Socket " << context << " send() "
<< errnoWithDescription(mongo_errno) << ' ' << remoteString() << endl;
throw SocketException( t , remoteString() );
}
@@ -702,11 +702,11 @@ namespace mongo {
int ret = ::sendmsg( _fd , &meta , portSendFlags );
if ( ret == -1 ) {
if ( errno != EAGAIN || _timeout == 0 ) {
- log(_logLevel) << "Socket " << context << " send() " << errnoWithDescription() << ' ' << remoteString() << endl;
+ LOG(_logLevel) << "Socket " << context << " send() " << errnoWithDescription() << ' ' << remoteString() << endl;
throw SocketException( SocketException::SEND_ERROR , remoteString() );
}
else {
- log(_logLevel) << "Socket " << context << " send() remote timeout " << remoteString() << endl;
+ LOG(_logLevel) << "Socket " << context << " send() remote timeout " << remoteString() << endl;
throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
}
}
@@ -735,13 +735,13 @@ namespace mongo {
int ret = unsafe_recv( buf , len );
if ( ret > 0 ) {
if ( len <= 4 && ret != len )
- log(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
+ LOG(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
verify( ret <= len );
len -= ret;
buf += ret;
}
else if ( ret == 0 ) {
- log(3) << "Socket recv() conn closed? " << remoteString() << endl;
+ LOG(3) << "Socket recv() conn closed? " << remoteString() << endl;
throw SocketException( SocketException::CLOSED , remoteString() );
}
else { /* ret < 0 */
@@ -763,11 +763,11 @@ namespace mongo {
) && _timeout > 0 )
{
// this is a timeout
- log(_logLevel) << "Socket recv() timeout " << remoteString() <<endl;
+ LOG(_logLevel) << "Socket recv() timeout " << remoteString() <<endl;
throw SocketException( SocketException::RECV_TIMEOUT, remoteString() );
}
- log(_logLevel) << "Socket recv() " << errnoWithDescription(e) << " " << remoteString() <<endl;
+ LOG(_logLevel) << "Socket recv() " << errnoWithDescription(e) << " " << remoteString() <<endl;
throw SocketException( SocketException::RECV_ERROR , remoteString() );
}
}
diff --git a/src/mongo/util/paths.h b/src/mongo/util/paths.h
index 5ba3c64d8ae..6bfb39b4bc5 100644
--- a/src/mongo/util/paths.h
+++ b/src/mongo/util/paths.h
@@ -110,7 +110,7 @@ namespace mongo {
boost::filesystem::path dir = file.branch_path(); // parent_path in new boosts
- log(1) << "flushing directory " << dir.string() << endl;
+ LOG(1) << "flushing directory " << dir.string() << endl;
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650, str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: " << errnoWithDescription(), fd >= 0);
diff --git a/src/mongo/util/version.cpp b/src/mongo/util/version.cpp
index 6df86e85c14..99f9dac5f71 100644
--- a/src/mongo/util/version.cpp
+++ b/src/mongo/util/version.cpp
@@ -306,7 +306,7 @@ namespace mongo {
verify( versionCmp("1.2.3-", "1.2.3") < 0 );
verify( versionCmp("1.2.3-pre", "1.2.3") < 0 );
- log(1) << "versionCmpTest passed" << endl;
+ LOG(1) << "versionCmpTest passed" << endl;
}
} versionCmpTest;
@@ -330,7 +330,7 @@ namespace mongo {
verify( _versionArray("1.2.0-rc4-pre-") == BSON_ARRAY(1 << 2 << 0 << -6) );
verify( _versionArray("2.0.0-rc5-pre-") == BSON_ARRAY(2 << 0 << 0 << -5) );
- log(1) << "versionArrayTest passed" << endl;
+ LOG(1) << "versionArrayTest passed" << endl;
}
} versionArrayTest;
}