summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@10gen.com>2013-07-01 14:35:03 -0400
committerAndy Schwerin <schwerin@10gen.com>2013-07-09 16:43:33 -0400
commitea22c3173541606519ddcb6da578b837a092b1c1 (patch)
treed3736e74ee67126f00466eec1686578ba8797749 /src/mongo/db
parent4bab9b0ee8dbffa00c4c86ea20195524a90f81f4 (diff)
downloadmongo-ea22c3173541606519ddcb6da578b837a092b1c1.tar.gz
SERVER-10084 New logging implementation.
This change-set: * Introduces a new top-level directory, mongo/logger, containing most of the implementation of logging functionality formerly found in log.cpp/log.h. * Cleans up existing, unusual uses of the logging system that were not trivially compatible with the new implementation. * Replaces Logstream/Nulstream with a LogstreamBuilder object, whose destructor writes log messages. This new LogstreamBuilder is reentrant, unlike the old logging code, which was thread-safe but not reentrant. Additionally, std::endl is no longer required to terminate a log line. When a LogstreamBuilder goes out of scope, the log message gets committed. * Separates the log system into several components: a global LogManager, several LogDomains, various kinds of Appenders (e.g., SyslogAppender) and Encoders (for formatting messages). * Allows unit tests to capture and examine log output. This patch does _not_ introduce support for hierarchical log domains, or for enabling and disabling specific log domains when the server is running in a multi-threaded mode. This is future work.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/btree.cpp17
-rw-r--r--src/mongo/db/btreebuilder.cpp3
-rw-r--r--src/mongo/db/client.cpp1
-rw-r--r--src/mongo/db/cmdline.cpp5
-rw-r--r--src/mongo/db/commands/index_stats.cpp2
-rw-r--r--src/mongo/db/commands/isself.cpp22
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/parameters.cpp41
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp4
-rw-r--r--src/mongo/db/commands/storage_details.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp5
-rw-r--r--src/mongo/db/database.cpp4
-rw-r--r--src/mongo/db/db.cpp18
-rw-r--r--src/mongo/db/dbcommands.cpp23
-rw-r--r--src/mongo/db/dbcommands_admin.cpp5
-rw-r--r--src/mongo/db/dbwebserver.cpp11
-rw-r--r--src/mongo/db/index.cpp2
-rw-r--r--src/mongo/db/index/btree_based_builder.cpp13
-rw-r--r--src/mongo/db/index_update.cpp4
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp87
-rw-r--r--src/mongo/db/instance.cpp14
-rw-r--r--src/mongo/db/jsobj.cpp9
-rw-r--r--src/mongo/db/ops/query.cpp10
-rw-r--r--src/mongo/db/pagefault.cpp4
-rw-r--r--src/mongo/db/pdfile.cpp14
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp6
-rw-r--r--src/mongo/db/query_optimizer_internal.cpp3
-rw-r--r--src/mongo/db/record.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp27
-rw-r--r--src/mongo/db/repl/oplog.cpp4
-rw-r--r--src/mongo/db/repl/rs.cpp4
-rw-r--r--src/mongo/db/server_extra_log_context.cpp2
-rw-r--r--src/mongo/db/storage/namespace_index.cpp2
33 files changed, 243 insertions, 129 deletions
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
index 0a5e8d905d4..c74d71a6884 100644
--- a/src/mongo/db/btree.cpp
+++ b/src/mongo/db/btree.cpp
@@ -146,7 +146,7 @@ namespace mongo {
this->assertValid(order, true);
if ( bt_dmp ) {
- _log() << thisLoc.toString() << ' ';
+ log() << thisLoc.toString() << ' ';
((BtreeBucket *) this)->dump(depth);
}
@@ -1761,18 +1761,19 @@ namespace mongo {
template< class V >
void BtreeBucket<V>::dump(unsigned depth) const {
string indent = string(depth, ' ');
- _log() << "BUCKET n:" << this->n;
- _log() << " parent:" << hex << this->parent.getOfs() << dec;
+ LogstreamBuilder l = log();
+ l << "BUCKET n:" << this->n;
+ l << " parent:" << hex << this->parent.getOfs() << dec;
for ( int i = 0; i < this->n; i++ ) {
- _log() << '\n' << indent;
+ l << '\n' << indent;
KeyNode k = keyNode(i);
string ks = k.key.toString();
- _log() << " " << hex << k.prevChildBucket.getOfs() << '\n';
- _log() << indent << " " << i << ' ' << ks.substr(0, 30) << " Loc:" << k.recordLoc.toString() << dec;
+ l << " " << hex << k.prevChildBucket.getOfs() << '\n';
+ l << indent << " " << i << ' ' << ks.substr(0, 30) << " Loc:" << k.recordLoc.toString() << dec;
if ( this->k(i).isUnused() )
- _log() << " UNUSED";
+ l << " UNUSED";
}
- _log() << "\n" << indent << " " << hex << this->nextChild.getOfs() << dec << endl;
+ l << "\n" << indent << " " << hex << this->nextChild.getOfs() << dec << endl;
}
/** todo: meaning of return code unclear clean up */
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
index 26c8e27c4b4..a97882b510a 100644
--- a/src/mongo/db/btreebuilder.cpp
+++ b/src/mongo/db/btreebuilder.cpp
@@ -151,8 +151,9 @@ namespace mongo {
mayCommitProgressDurably();
}
- if( levels > 1 )
+ if( levels > 1 ) {
LOG(2) << "btree levels: " << levels << endl;
+ }
}
/** when all addKeys are done, we then build the higher levels of the tree */
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 6e098264367..974d072fe93 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -48,6 +48,7 @@
#include "mongo/s/d_logic.h"
#include "mongo/s/stale_exception.h" // for SendStaleConfigException
#include "mongo/scripting/engine.h"
+#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/file_allocator.h"
#include "mongo/util/mongoutils/checksum.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/cmdline.cpp b/src/mongo/db/cmdline.cpp
index f1c6d8661de..5b271439f15 100644
--- a/src/mongo/db/cmdline.cpp
+++ b/src/mongo/db/cmdline.cpp
@@ -296,12 +296,13 @@ namespace {
}
if (params.count("verbose")) {
- logLevel = 1;
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1));
}
for (string s = "vv"; s.length() <= 12; s.append("v")) {
if (params.count(s)) {
- logLevel = s.length();
+ logger::globalLogDomain()->setMinimumLoggedSeverity(
+ logger::LogSeverity::Debug(s.length()));
}
}
diff --git a/src/mongo/db/commands/index_stats.cpp b/src/mongo/db/commands/index_stats.cpp
index 1309e078ee9..6a5a6f0cf6a 100644
--- a/src/mongo/db/commands/index_stats.cpp
+++ b/src/mongo/db/commands/index_stats.cpp
@@ -501,7 +501,7 @@ namespace mongo {
string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
const NamespaceDetails* nsd = nsdetails(ns);
if (!cmdLine.quiet) {
- tlog() << "CMD: indexStats " << ns << endl;
+ MONGO_TLOG(0) << "CMD: indexStats " << ns << endl;
}
if (!nsd) {
errmsg = "ns not found";
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index 055f966c3fe..b6f7e8d6d85 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -97,12 +97,15 @@ namespace mongo {
freeifaddrs( addrs );
addrs = NULL;
- if (logLevel >= 1) {
- LOG(1) << "getMyAddrs():";
+ if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
+ LogstreamBuilder builder(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Debug(1));
+ builder << "getMyAddrs():";
for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
- LOG(1) << " [" << *it << ']';
+ builder << " [" << *it << ']';
}
- LOG(1) << endl;
+ builder << endl;
}
return out;
@@ -141,12 +144,15 @@ namespace mongo {
freeaddrinfo(addrs);
- if (logLevel >= 1) {
- LOG(1) << "getallIPs(\"" << iporhost << "\"):";
+ if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
+ LogstreamBuilder builder(logger::globalLogDomain(),
+ getThreadName(),
+ logger::LogSeverity::Debug(1));
+ builder << "getallIPs(\"" << iporhost << "\"):";
for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
- LOG(1) << " [" << *it << ']';
+ builder << " [" << *it << ']';
}
- LOG(1) << endl;
+ builder << endl;
}
return out;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index be11a40b995..05cbdb1dd31 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -910,7 +910,7 @@ namespace mongo {
{
dbtempreleasecond tl;
if ( ! tl.unlocked() )
- LOG( LL_WARNING ) << "map/reduce can't temp release" << endl;
+ warning() << "map/reduce can't temp release" << endl;
// reduce and finalize last array
finalReduce( all );
}
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index b23f6c49665..ddb6bb4a217 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -23,6 +23,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/cmdline.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -162,11 +163,41 @@ namespace mongo {
} cmdSet;
namespace {
- ExportedServerParameter<int> LogLevelSetting( ServerParameterSet::getGlobal(),
- "logLevel",
- &logLevel,
- true,
- true );
+ class LogLevelSetting : public ServerParameter {
+ public:
+ LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
+
+ virtual void append(BSONObjBuilder& b, const std::string& name) {
+ b << name << logger::globalLogDomain()->getMinimumLogSeverity().toInt();
+ }
+
+ virtual Status set(const BSONElement& newValueElement) {
+ typedef logger::LogSeverity LogSeverity;
+ int newValue;
+ if (!newValueElement.coerce(&newValue) || newValue < 0)
+ return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
+ "Invalid value for logLevel: " << newValueElement);
+ LogSeverity newSeverity = (newValue > 0) ? LogSeverity::Debug(newValue) :
+ LogSeverity::Log();
+ logger::globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
+ return Status::OK();
+ }
+
+ virtual Status setFromString(const std::string& str) {
+ typedef logger::LogSeverity LogSeverity;
+ int newValue;
+ Status status = parseNumberFromString(str, &newValue);
+ if (!status.isOK())
+ return status;
+ if (newValue < 0)
+ return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
+ "Invalid value for logLevel: " << newValue);
+ LogSeverity newSeverity = (newValue > 0) ? LogSeverity::Debug(newValue) :
+ LogSeverity::Log();
+ logger::globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
+ return Status::OK();
+ }
+ } logLevelSetting;
ExportedServerParameter<bool> NoTableScanSetting( ServerParameterSet::getGlobal(),
"notablescan",
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index d5905325bd2..56df3369d24 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -127,7 +127,7 @@ namespace mongo {
BSONObj shardBson(shardBuilder.done());
DEV (log() << "\n---- shardBson\n" <<
- shardBson.jsonString(Strict, 1) << "\n----\n").flush();
+ shardBson.jsonString(Strict, 1) << "\n----\n");
/* for debugging purposes, show what the pipeline now looks like */
DEV {
@@ -135,7 +135,7 @@ namespace mongo {
pPipeline->toBson(&pipelineBuilder);
BSONObj pipelineBson(pipelineBuilder.done());
(log() << "\n---- pipelineBson\n" <<
- pipelineBson.jsonString(Strict, 1) << "\n----\n").flush();
+ pipelineBson.jsonString(Strict, 1) << "\n----\n");
}
/* on the shard servers, create the local pipeline */
diff --git a/src/mongo/db/commands/storage_details.cpp b/src/mongo/db/commands/storage_details.cpp
index b86ec1dff5e..4ea51f0ebd9 100644
--- a/src/mongo/db/commands/storage_details.cpp
+++ b/src/mongo/db/commands/storage_details.cpp
@@ -777,7 +777,7 @@ namespace {
const string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
const NamespaceDetails* nsd = nsdetails(ns);
if (!cmdLine.quiet) {
- tlog() << "CMD: storageDetails " << ns << ", analyze " << subCommandStr << endl;
+ MONGO_TLOG(0) << "CMD: storageDetails " << ns << ", analyze " << subCommandStr << endl;
}
if (!nsd) {
errmsg = "ns not found";
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 23aca5cc1c6..89096429f91 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -157,9 +157,10 @@ namespace mongo {
opDebug.recordStats();
// Log operation if running with at least "-v", or if exceeds slow threshold.
- if (logLevel >= 1
+ if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))
|| opDebug.executionTime > cmdLine.slowMS + childOp.getExpectedLatencyMs()) {
- tlog() << opDebug.report(childOp) << endl;
+
+ MONGO_TLOG(1) << opDebug.report(childOp) << endl;
}
// TODO Log operation if logLevel >= 3 and assertion thrown (as assembleResponse()
diff --git a/src/mongo/db/database.cpp b/src/mongo/db/database.cpp
index 1d151241830..2f69f217e24 100644
--- a/src/mongo/db/database.cpp
+++ b/src/mongo/db/database.cpp
@@ -194,7 +194,7 @@ namespace mongo {
{
if( n < (int) _files.size() && _files[n] ) {
- dlog(2) << "openExistingFile " << n << " is already open" << endl;
+ MONGO_DLOG(2) << "openExistingFile " << n << " is already open" << endl;
return true;
}
}
@@ -474,7 +474,7 @@ namespace mongo {
// that would make the DBs map very large. not clear what to do to handle though,
// perhaps just log it, which is what we do here with the "> 40" :
bool cant = !Lock::isWriteLocked(ns);
- if( logLevel >= 1 || m.size() > 40 || cant || DEBUG_BUILD ) {
+ if( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) || m.size() > 40 || cant || DEBUG_BUILD ) {
log() << "opening db: " << (path==dbpath?"":path) << ' ' << dbname << endl;
}
massert(15927, "can't open database in a read lock. if db was just closed, consider retrying the query. might otherwise indicate an internal error", !cant);
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 8ac02271f06..51a1c61fb4b 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -60,6 +60,7 @@
#include "mongo/scripting/engine.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/task.h"
+#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/exception_filter_win32.h"
#include "mongo/util/file_allocator.h"
#include "mongo/util/net/message_server.h"
@@ -446,12 +447,15 @@ namespace mongo {
void run() {
Client::initThread( name().c_str() );
- if( cmdLine.syncdelay == 0 )
+ if( cmdLine.syncdelay == 0 ) {
log() << "warning: --syncdelay 0 is not recommended and can have strange performance" << endl;
- else if( cmdLine.syncdelay == 1 )
+ }
+ else if( cmdLine.syncdelay == 1 ) {
log() << "--syncdelay 1" << endl;
- else if( cmdLine.syncdelay != 60 )
+ }
+ else if( cmdLine.syncdelay != 60 ) {
LOG(1) << "--syncdelay " << cmdLine.syncdelay << endl;
+ }
int time_flushing = 0;
while ( ! inShutdown() ) {
_diaglog.flush();
@@ -474,7 +478,7 @@ namespace mongo {
_flushed(time_flushing);
- if( logLevel >= 1 || time_flushing >= 10000 ) {
+ if( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) || time_flushing >= 10000 ) {
log() << "flushing mmaps took " << time_flushing << "ms " << " for " << numFiles << " files" << endl;
}
}
@@ -577,13 +581,15 @@ namespace mongo {
Client::initThread("initandlisten");
- Logstream::get().addGlobalTee( new RamLog("global") );
+ logger::globalLogDomain()->attachAppender(
+ logger::MessageLogDomain::AppenderAutoPtr(
+ new RamLogAppender(new RamLog("global"))));
bool is32bit = sizeof(int*) == 4;
{
ProcessId pid = ProcessId::getCurrent();
- Nullstream& l = log();
+ LogstreamBuilder l = log();
l << "MongoDB starting : pid=" << pid << " port=" << cmdLine.port << " dbpath=" << dbpath;
if( replSettings.master ) l << " master=" << replSettings.master;
if( replSettings.slave ) l << " slave=" << (int) replSettings.slave;
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 8ab4b4a0d14..3a2a9036f62 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -599,8 +599,9 @@ namespace mongo {
bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
_diaglog.flush();
- if ( !cmdLine.quiet )
- tlog() << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was << endl;
+ if ( !cmdLine.quiet ) {
+ MONGO_TLOG(0) << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was << endl;
+ }
result.append( "was" , was );
return true;
}
@@ -643,8 +644,9 @@ namespace mongo {
virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string nsToDrop = dbname + '.' + cmdObj.firstElement().valuestr();
NamespaceDetails *d = nsdetails(nsToDrop);
- if ( !cmdLine.quiet )
- tlog() << "CMD: drop " << nsToDrop << endl;
+ if ( !cmdLine.quiet ) {
+ MONGO_TLOG(0) << "CMD: drop " << nsToDrop << endl;
+ }
if ( d == 0 ) {
errmsg = "ns not found";
return false;
@@ -817,8 +819,9 @@ namespace mongo {
BSONElement e = jsobj.firstElement();
string toDeleteNs = dbname + '.' + e.valuestr();
NamespaceDetails *d = nsdetails(toDeleteNs);
- if ( !cmdLine.quiet )
- tlog() << "CMD: dropIndexes " << toDeleteNs << endl;
+ if ( !cmdLine.quiet ) {
+ MONGO_TLOG(0) << "CMD: dropIndexes " << toDeleteNs << endl;
+ }
if ( d ) {
stopIndexBuilds(dbname, jsobj);
@@ -883,7 +886,7 @@ namespace mongo {
BSONElement e = jsobj.firstElement();
string toDeleteNs = dbname + '.' + e.valuestr();
NamespaceDetails *d = nsdetails(toDeleteNs);
- tlog() << "CMD: reIndex " << toDeleteNs << endl;
+ MONGO_TLOG(0) << "CMD: reIndex " << toDeleteNs << endl;
BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs.c_str());
if ( ! d ) {
@@ -2165,8 +2168,9 @@ namespace mongo {
return;
}
- if ( c->adminOnly() )
+ if ( c->adminOnly() ) {
LOG( 2 ) << "command: " << cmdObj << endl;
+ }
if (c->maintenanceMode() && theReplSet) {
mmSetter.reset(new MaintenanceModeSetter());
@@ -2241,8 +2245,7 @@ namespace mongo {
bool _runCommands(const char *ns, BSONObj& _cmdobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
string dbname = nsToDatabase( ns );
- if( logLevel >= 1 )
- log() << "run command " << ns << ' ' << _cmdobj << endl;
+ LOG(1) << "run command " << ns << ' ' << _cmdobj << endl;
const char *p = strchr(ns, '.');
if ( !p ) return false;
diff --git a/src/mongo/db/dbcommands_admin.cpp b/src/mongo/db/dbcommands_admin.cpp
index d2d68df8011..48bf66cd477 100644
--- a/src/mongo/db/dbcommands_admin.cpp
+++ b/src/mongo/db/dbcommands_admin.cpp
@@ -162,8 +162,9 @@ namespace mongo {
bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
NamespaceDetails * d = nsdetails( ns );
- if ( !cmdLine.quiet )
- tlog() << "CMD: validate " << ns << endl;
+ if ( !cmdLine.quiet ) {
+ MONGO_TLOG(0) << "CMD: validate " << ns << endl;
+ }
if ( ! d ) {
errmsg = "ns not found";
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index bab3fa99f3f..54d4c323a42 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -26,6 +26,7 @@
#include <boost/date_time/posix_time/posix_time.hpp>
#include <pcrecpp.h>
+#include "mongo/base/init.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
@@ -334,7 +335,8 @@ namespace mongo {
_log = RamLog::get( "global" );
if ( ! _log ) {
_log = new RamLog("global");
- Logstream::get().addGlobalTee( _log );
+ logger::globalLogDomain()->attachAppender(logger::MessageLogDomain::AppenderAutoPtr(
+ new RamLogAppender(_log)));
}
}
@@ -344,7 +346,12 @@ namespace mongo {
RamLog * _log;
};
- LogPlugin * logPlugin = new LogPlugin();
+ MONGO_INITIALIZER_GENERAL(WebStatusLogPlugin, ("ServerLogRedirection"), ("default"))(
+ InitializerContext*) {
+
+ new LogPlugin;
+ return Status::OK();
+ }
// -- handler framework ---
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 8d3a46c0f2b..9f6da5803be 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -243,7 +243,7 @@ namespace mongo {
return false;
}
sourceCollection = nsdetails(sourceNS);
- tlog() << "info: creating collection " << sourceNS << " on add index" << endl;
+ MONGO_TLOG(0) << "info: creating collection " << sourceNS << " on add index" << endl;
verify( sourceCollection );
}
diff --git a/src/mongo/db/index/btree_based_builder.cpp b/src/mongo/db/index/btree_based_builder.cpp
index 42618de14e9..735cb292b9b 100644
--- a/src/mongo/db/index/btree_based_builder.cpp
+++ b/src/mongo/db/index/btree_based_builder.cpp
@@ -162,7 +162,7 @@ namespace mongo {
phaseOne->addKeys(keys, loc, mayInterrupt);
cursor->advance();
progressMeter->hit();
- if ( logLevel > 1 && phaseOne->n % 10000 == 0 ) {
+ if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) && phaseOne->n % 10000 == 0 ) {
printMemInfo( "\t iterating objects" );
}
}
@@ -175,7 +175,7 @@ namespace mongo {
Timer t;
- tlog(1) << "fastBuildIndex " << ns << ' ' << idx.info.obj().toString() << endl;
+ MONGO_TLOG(1) << "fastBuildIndex " << ns << ' ' << idx.info.obj().toString() << endl;
bool dupsAllowed = !idx.unique() || ignoreUniqueIndex(idx);
bool dropDups = idx.dropDups() || inDBRepair;
@@ -183,7 +183,8 @@ namespace mongo {
getDur().writingDiskLoc(idx.head).Null();
- if ( logLevel > 1 ) printMemInfo( "before index start" );
+ if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) )
+ printMemInfo( "before index start" );
/* get and sort all the keys ----- */
ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort",
@@ -201,9 +202,11 @@ namespace mongo {
d->setIndexIsMultikey(ns, idxNo);
}
- if ( logLevel > 1 ) printMemInfo( "before final sort" );
+ if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) )
+ printMemInfo( "before final sort" );
phase1.sorter->sort( mayInterrupt );
- if ( logLevel > 1 ) printMemInfo( "after final sort" );
+ if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) )
+ printMemInfo( "after final sort" );
LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles()
<< " files " << " in " << t.seconds() << " secs" << endl;
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index daaa955f84f..390f1794de5 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -269,7 +269,7 @@ namespace mongo {
bool background = idx.info.obj()["background"].trueValue();
- tlog() << "build index " << ns << ' ' << idx.keyPattern() << ( background ? " background" : "" ) << endl;
+ MONGO_TLOG(0) << "build index " << ns << ' ' << idx.keyPattern() << ( background ? " background" : "" ) << endl;
Timer t;
unsigned long long n;
@@ -284,7 +284,7 @@ namespace mongo {
BackgroundIndexBuildJob j(ns.c_str());
n = j.go(ns, d, idx);
}
- tlog() << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl;
+ MONGO_TLOG(0) << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl;
}
extern BSONObj id_obj; // { _id : 1 }
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 9d20834d5b2..125ab6fccf1 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -19,15 +19,25 @@
#include "mongo/db/initialize_server_global_state.h"
#include <boost/filesystem/operations.hpp>
+#include <memory>
#ifndef _WIN32
+#include <syslog.h>
#include <sys/types.h>
#include <sys/wait.h>
#endif
+#include "mongo/base/init.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/security_key.h"
#include "mongo/db/cmdline.h"
+#include "mongo/logger/logger.h"
+#include "mongo/logger/message_event.h"
+#include "mongo/logger/message_event_utf8_encoder.h"
+#include "mongo/logger/rotatable_file_appender.h"
+#include "mongo/logger/rotatable_file_manager.h"
+#include "mongo/logger/rotatable_file_writer.h"
+#include "mongo/logger/syslog_appender.h"
#include "mongo/platform/process_id.h"
#include "mongo/util/log.h"
#include "mongo/util/net/listen.h"
@@ -132,14 +142,13 @@ namespace mongo {
// this is run in the final child process (the server)
- // stdout handled in initLogging
- //fclose(stdout);
- //freopen("/dev/null", "w", stdout);
-
- fclose(stderr);
- fclose(stdin);
+ FILE* f = freopen("/dev/null", "w", stdout);
+ if ( f == NULL ) {
+ cout << "Cant reassign stdout while forking server process: " << strerror(errno) << endl;
+ return false;
+ }
- FILE* f = freopen("/dev/null", "w", stderr);
+ f = freopen("/dev/null", "w", stderr);
if ( f == NULL ) {
cout << "Cant reassign stderr while forking server process: " << strerror(errno) << endl;
return false;
@@ -160,32 +169,74 @@ namespace mongo {
_exit(EXIT_FAILURE);
}
- bool initializeServerGlobalState() {
+ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
+ ("GlobalLogManager", "globalVariablesConfigured"),
+ ("default"))(
+ InitializerContext*) {
- Listener::globalTicketHolder.resize( cmdLine.maxConns );
+ using logger::LogManager;
+ using logger::MessageEventEphemeral;
+ using logger::MessageEventDetailsEncoder;
+ using logger::MessageEventWithContextEncoder;
+ using logger::MessageLogDomain;
+ using logger::RotatableFileAppender;
+ using logger::StatusWithRotatableFileWriter;
#ifndef _WIN32
- if (!fs::is_directory(cmdLine.socket)) {
- cout << cmdLine.socket << " must be a directory" << endl;
- return false;
- }
+ using logger::SyslogAppender;
if (cmdLine.logWithSyslog) {
StringBuilder sb;
sb << cmdLine.binaryName << "." << cmdLine.port;
- Logstream::useSyslog( sb.str().c_str() );
+ openlog(strdup(sb.str().c_str()), LOG_PID | LOG_CONS, LOG_USER);
+ LogManager* manager = logger::globalLogManager();
+ manager->getGlobalDomain()->clearAppenders();
+ manager->getGlobalDomain()->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(
+ new SyslogAppender<MessageEventEphemeral>(
+ new logger::MessageEventWithContextEncoder)));
+ manager->getNamedDomain("javascriptOutput")->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(
+ new SyslogAppender<MessageEventEphemeral>(
+ new logger::MessageEventWithContextEncoder)));
}
-#endif
+#endif // defined(_WIN32)
+
if (!cmdLine.logpath.empty()) {
fassert(16448, !cmdLine.logWithSyslog);
string absoluteLogpath = boost::filesystem::absolute(
cmdLine.logpath, cmdLine.cwd).string();
- if (!initLogging(absoluteLogpath, cmdLine.logAppend)) {
- cout << "Bad logpath value: \"" << absoluteLogpath << "\"; terminating." << endl;
- return false;
+ StatusWithRotatableFileWriter writer =
+ logger::globalRotatableFileManager()->openFile(absoluteLogpath, cmdLine.logAppend);
+ if (!writer.isOK()) {
+ return writer.getStatus();
}
+ LogManager* manager = logger::globalLogManager();
+ manager->getGlobalDomain()->clearAppenders();
+ manager->getGlobalDomain()->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(
+ new RotatableFileAppender<MessageEventEphemeral>(
+ new MessageEventDetailsEncoder, writer.getValue())));
+ manager->getNamedDomain("javascriptOutput")->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(
+ new RotatableFileAppender<MessageEventEphemeral>(
+ new MessageEventDetailsEncoder, writer.getValue())));
}
+ return Status::OK();
+ }
+
+ bool initializeServerGlobalState() {
+
+ Listener::globalTicketHolder.resize( cmdLine.maxConns );
+
+#ifndef _WIN32
+ if (!fs::is_directory(cmdLine.socket)) {
+ cout << cmdLine.socket << " must be a directory" << endl;
+ return false;
+ }
+#endif
+
if (!cmdLine.pidFile.empty()) {
writePidFile(cmdLine.pidFile);
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index cfcbb0480c4..9b260663b93 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -318,7 +318,7 @@ namespace mongo {
void mongoAbort(const char *msg) {
if( reportEventToSystem )
reportEventToSystem(msg);
- rawOut(msg);
+ severe() << msg;
::abort();
}
@@ -406,7 +406,7 @@ namespace mongo {
debug.op = op;
long long logThreshold = cmdLine.slowMS;
- bool shouldLog = logLevel >= 1;
+ bool shouldLog = logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1));
if ( op == dbQuery ) {
if ( handlePossibleShardedMessage( m , &dbresponse ) )
@@ -466,12 +466,12 @@ namespace mongo {
}
}
catch ( UserException& ue ) {
- tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing "
+ MONGO_TLOG(3) << " Caught Assertion in " << opToString(op) << ", continuing "
<< ue.toString() << endl;
debug.exceptionInfo = ue.getInfo();
}
catch ( AssertionException& e ) {
- tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing "
+ MONGO_TLOG(3) << " Caught Assertion in " << opToString(op) << ", continuing "
<< e.toString() << endl;
debug.exceptionInfo = e.getInfo();
shouldLog = true;
@@ -484,7 +484,7 @@ namespace mongo {
logThreshold += currentOp.getExpectedLatencyMs();
if ( shouldLog || debug.executionTime > logThreshold ) {
- mongo::tlog() << debug.report( currentOp ) << endl;
+ MONGO_TLOG(0) << debug.report( currentOp ) << endl;
}
if ( currentOp.shouldDBProfile( debug.executionTime ) ) {
@@ -514,13 +514,13 @@ namespace mongo {
uassert( 13004 , str::stream() << "sent negative cursors to kill: " << n , n >= 1 );
if ( n > 2000 ) {
- LOG( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ ( n < 30000 ? warning() : error() ) << "receivedKillCursors, n=" << n << endl;
verify( n < 30000 );
}
int found = ClientCursor::eraseIfAuthorized(n, (long long *) x);
- if ( logLevel > 0 || found != n ) {
+ if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) || found != n ) {
LOG( found == n ? 1 : 0 ) << "killcursors: found " << found << " of " << n << endl;
}
diff --git a/src/mongo/db/jsobj.cpp b/src/mongo/db/jsobj.cpp
index 55c484720ed..bc488d07aea 100644
--- a/src/mongo/db/jsobj.cpp
+++ b/src/mongo/db/jsobj.cpp
@@ -922,13 +922,14 @@ namespace mongo {
}
void BSONObj::dump() const {
- out() << hex;
+ LogstreamBuilder builder = out();
+ builder << hex;
const char *p = objdata();
for ( int i = 0; i < objsize(); i++ ) {
- out() << i << '\t' << ( 0xff & ( (unsigned) *p ) );
+ builder << i << '\t' << ( 0xff & ( (unsigned) *p ) );
if ( *p >= 'A' && *p <= 'z' )
- out() << '\t' << *p;
- out() << endl;
+ builder << '\t' << *p;
+ builder << endl;
p++;
}
}
diff --git a/src/mongo/db/ops/query.cpp b/src/mongo/db/ops/query.cpp
index b7156d305ae..9f6822fd51a 100644
--- a/src/mongo/db/ops/query.cpp
+++ b/src/mongo/db/ops/query.cpp
@@ -800,7 +800,7 @@ namespace mongo {
ccPointer.reset( new ClientCursor( queryOptions, cursor, ns,
jsobj.getOwned() ) );
cursorid = ccPointer->cursorid();
- DEV tlog(2) << "query has more, cursorid: " << cursorid << endl;
+ DEV { MONGO_TLOG(2) << "query has more, cursorid: " << cursorid << endl; }
if ( cursor->supportYields() ) {
ClientCursor::YieldData data;
ccPointer->prepareToYield( data );
@@ -815,7 +815,10 @@ namespace mongo {
}
if ( !ccPointer->ok() && ccPointer->c()->tailable() ) {
- DEV tlog() << "query has no more but tailable, cursorid: " << cursorid << endl;
+ DEV {
+ MONGO_TLOG(0) << "query has no more but tailable, cursorid: " << cursorid <<
+ endl;
+ }
}
if( queryOptions & QueryOption_Exhaust ) {
@@ -941,8 +944,7 @@ namespace mongo {
uassert( 16332 , "can't have an empty ns" , ns[0] );
- if( logLevel >= 2 )
- log() << "runQuery called " << ns << " " << jsobj << endl;
+ LOG(2) << "runQuery called " << ns << " " << jsobj << endl;
curop.debug().ns = ns;
curop.debug().ntoreturn = pq.getNumToReturn();
diff --git a/src/mongo/db/pagefault.cpp b/src/mongo/db/pagefault.cpp
index 74d4e486d53..3a738e55ecc 100644
--- a/src/mongo/db/pagefault.cpp
+++ b/src/mongo/db/pagefault.cpp
@@ -45,7 +45,7 @@ namespace mongo {
if( LockMongoFilesShared::getEra() != era ) {
// files opened and closed. we don't try to handle but just bail out; this is much simpler
// and less error prone and saves us from taking a dbmutex readlock.
- dlog(2) << "era changed" << endl;
+ MONGO_DLOG(2) << "era changed" << endl;
return;
}
r->touch();
@@ -59,7 +59,7 @@ namespace mongo {
verify( cc()._pageFaultRetryableSection == 0 );
if( Lock::isLocked() ) {
cc()._pageFaultRetryableSection = 0;
- if( debug || logLevel > 2 ) {
+ if( debug || logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(3)) ) {
LOGSOME << "info PageFaultRetryableSection will not yield, already locked upon reaching" << endl;
}
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index db6d1351308..e6ba5e9d104 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -399,7 +399,7 @@ namespace mongo {
if( !boost::filesystem::exists(filename) )
return false;
if( !mmf.open(filename,false) ) {
- dlog(2) << "info couldn't open " << filename << " probably end of datafile list" << endl;
+ MONGO_DLOG(2) << "info couldn't open " << filename << " probably end of datafile list" << endl;
return false;
}
_mb = mmf.getView(); verify(_mb);
@@ -518,8 +518,11 @@ namespace mongo {
addNewExtentToNamespace(ns, e, loc, emptyLoc, newCapped);
- DEV tlog(1) << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
- << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
+ DEV {
+ MONGO_TLOG(1) << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x"
+ << hex << offset << " emptyLoc:" << hex << emptyLoc.getOfs() << dec
+ << endl;
+ }
return e;
}
@@ -586,7 +589,7 @@ namespace mongo {
}
}
- if( n > 128 ) LOG( n < 512 ? 1 : 0 ) << "warning: newExtent " << n << " scanned\n";
+ if( n > 128 ) { LOG( n < 512 ? 1 : 0 ) << "warning: newExtent " << n << " scanned\n"; }
if( best ) {
Extent *e = best;
@@ -1953,8 +1956,9 @@ namespace mongo {
q = p / (c+"ns");
bool ok = false;
MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply( q ) );
- if ( ok )
+ if ( ok ) {
LOG(2) << fo.op() << " file " << q.string() << endl;
+ }
int i = 0;
int extra = 10; // should not be necessary, this is defensive in case there are missing files
while ( 1 ) {
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 5cfb53d148b..d6dcb22732e 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -120,11 +120,11 @@ namespace mongo {
/* for debugging purposes, show what the query and sort are */
DEV {
(log() << "\n---- query BSON\n" <<
- queryObj.jsonString(Strict, 1) << "\n----\n").flush();
+ queryObj.jsonString(Strict, 1) << "\n----\n");
(log() << "\n---- sort BSON\n" <<
- sortObj.jsonString(Strict, 1) << "\n----\n").flush();
+ sortObj.jsonString(Strict, 1) << "\n----\n");
(log() << "\n---- fullName\n" <<
- fullName << "\n----\n").flush();
+ fullName << "\n----\n");
}
// Create the necessary context to use a Cursor, including taking a namespace read lock,
diff --git a/src/mongo/db/query_optimizer_internal.cpp b/src/mongo/db/query_optimizer_internal.cpp
index 3191772ec51..6d21d03bec6 100644
--- a/src/mongo/db/query_optimizer_internal.cpp
+++ b/src/mongo/db/query_optimizer_internal.cpp
@@ -946,8 +946,9 @@ namespace mongo {
shared_ptr<QueryPlanRunner> QueryPlanRunnerQueue::init() {
massert( 10369, "no plans", _plans.plans().size() > 0 );
- if ( _plans.plans().size() > 1 )
+ if ( _plans.plans().size() > 1 ) {
LOG(1) << " running multiple plans" << endl;
+ }
for( QueryPlanSet::PlanVector::const_iterator i = _plans.plans().begin();
i != _plans.plans().end(); ++i ) {
shared_ptr<QueryPlanRunner> runner( _prototypeRunner.createChild() );
diff --git a/src/mongo/db/record.cpp b/src/mongo/db/record.cpp
index d55d6d40f68..46c36537e42 100644
--- a/src/mongo/db/record.cpp
+++ b/src/mongo/db/record.cpp
@@ -190,7 +190,7 @@ namespace mongo {
if ( rarely_count++ % ( 2048 / BigHashSize ) == 0 ) {
long long now = Listener::getElapsedTimeMillis();
RARELY if ( now == 0 ) {
- tlog() << "warning Listener::getElapsedTimeMillis returning 0ms" << endl;
+ MONGO_TLOG(0) << "warning Listener::getElapsedTimeMillis returning 0ms" << endl;
}
if ( now - _lastRotate > ( 1000 * RotateTimeSecs ) ) {
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index cb4a4ce3388..9fd6cb721f8 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -527,8 +527,7 @@ namespace mongo {
@param alreadyLocked caller already put us in write lock if true
*/
void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked) {
- if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
- LOG(6) << "processing op: " << op << endl;
+ LOG(6) << "processing op: " << op << endl;
if( op.getStringField("op")[0] == 'n' )
return;
@@ -609,8 +608,7 @@ namespace mongo {
bool empty = ctx.db()->isEmpty();
bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
- if( logLevel >= 6 )
- LOG(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
+ LOG(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
// always apply admin command command
// this is a bit hacky -- the semantics of replication/commands aren't well specified
@@ -844,17 +842,14 @@ namespace mongo {
nextOpTime = OpTime(); // will reread the op below
}
else if ( nextOpTime != syncedTo ) { // didn't get what we queried for - error
- Nullstream& l = log();
- l << "repl: nextOpTime " << nextOpTime.toStringLong() << ' ';
- if ( nextOpTime < syncedTo )
- l << "<??";
- else
- l << ">";
-
- l << " syncedTo " << syncedTo.toStringLong() << '\n';
- log() << "repl: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
- log() << "repl: tailing: " << tailing << '\n';
- log() << "repl: data too stale, halting replication" << endl;
+ log()
+ << "repl: nextOpTime " << nextOpTime.toStringLong() << ' '
+ << ((nextOpTime < syncedTo) ? "<??" : ">")
+ << " syncedTo " << syncedTo.toStringLong() << '\n'
+ << "repl: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs())
+ << "sec\n"
+ << "repl: tailing: " << tailing << '\n'
+ << "repl: data too stale, halting replication" << endl;
replInfo = replAllDead = "data too stale halted replication";
verify( syncedTo < nextOpTime );
throw SyncException();
@@ -977,7 +972,7 @@ namespace mongo {
_sleepAdviceTime = 0;
ReplInfo r("sync");
if ( !cmdLine.quiet ) {
- Nullstream& l = log();
+ LogstreamBuilder l = log();
l << "repl: syncing from ";
if( sourceName() != "main" ) {
l << "source:" << sourceName() << ' ';
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 9917f10b982..1697641ace7 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -229,9 +229,7 @@ namespace mongo {
append_O_Obj(r->data(), partial, obj);
- if ( logLevel >= 6 ) {
- LOG( 6 ) << "logOp:" << BSONObj::make(r) << endl;
- }
+ LOG( 6 ) << "logOp:" << BSONObj::make(r) << endl;
}
static void _logOpOld(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, bool fromMigrate ) {
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index 7290e24e7d6..e0c58423611 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -62,7 +62,7 @@ namespace mongo {
void replset::sethbmsg(const string& s, const int level) {
if (theReplSet) {
- theReplSet->sethbmsg(s, logLevel);
+ theReplSet->sethbmsg(s, level);
}
}
@@ -686,7 +686,7 @@ namespace mongo {
int n = 0;
for( vector<ReplSetConfig*>::iterator i = cfgs.begin(); i != cfgs.end(); i++ ) {
ReplSetConfig* cfg = *i;
- DEV LOG(1) << n+1 << " config shows version " << cfg->version << rsLog;
+ DEV { LOG(1) << n+1 << " config shows version " << cfg->version << rsLog; }
if( ++n == 1 ) myVersion = cfg->version;
if( cfg->ok() && cfg->version > v ) {
highest = cfg;
diff --git a/src/mongo/db/server_extra_log_context.cpp b/src/mongo/db/server_extra_log_context.cpp
index b183c202751..86c446505ca 100644
--- a/src/mongo/db/server_extra_log_context.cpp
+++ b/src/mongo/db/server_extra_log_context.cpp
@@ -61,7 +61,7 @@ namespace {
if (!logUserIds)
return Status::OK();
- return Logstream::registerExtraLogContextFn(appendServerExtraLogContext);
+ return logger::registerExtraLogContextFn(appendServerExtraLogContext);
}
} // namespace
diff --git a/src/mongo/db/storage/namespace_index.cpp b/src/mongo/db/storage/namespace_index.cpp
index 715300d6ef1..07cb3767161 100644
--- a/src/mongo/db/storage/namespace_index.cpp
+++ b/src/mongo/db/storage/namespace_index.cpp
@@ -70,7 +70,7 @@ namespace mongo {
_ht->kill(extra);
}
catch(DBException&) {
- dlog(3) << "caught exception in kill_ns" << endl;
+ MONGO_DLOG(3) << "caught exception in kill_ns" << endl;
}
}
}