diff options
author | Thomas Schubert <thomas.schubert@mongodb.com> | 2016-08-29 14:11:18 -0400 |
---|---|---|
committer | Thomas Schubert <thomas.schubert@mongodb.com> | 2016-09-02 13:24:58 -0400 |
commit | 710a443a980b88a36d8c4ef0ea3cc0d375681908 (patch) | |
tree | 74c82c605c9846cf14072162596d6d9c327328c5 /src/mongo | |
parent | 49b654c70e3df23d8fbba47d63a88a06c3d51f83 (diff) | |
download | mongo-710a443a980b88a36d8c4ef0ea3cc0d375681908.tar.gz |
SERVER-24991 log redaction for commands, concurrency, exec, index
Diffstat (limited to 'src/mongo')
43 files changed, 152 insertions, 177 deletions
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp index 3cc202bcae8..a7cf1e04017 100644 --- a/src/mongo/db/commands/clone_collection.cpp +++ b/src/mongo/db/commands/clone_collection.cpp @@ -142,8 +142,8 @@ public: bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true; log() << "cloneCollection. db:" << dbname << " collection:" << collection - << " from: " << fromhost << " query: " << query << " " - << (copyIndexes ? "" : ", not copying indexes") << endl; + << " from: " << fromhost << " query: " << redact(query) << " " + << (copyIndexes ? "" : ", not copying indexes"); Cloner cloner; unique_ptr<DBClientConnection> myconn; diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp index 9be54eb39ab..b53fcda0b65 100644 --- a/src/mongo/db/commands/compact.cpp +++ b/src/mongo/db/commands/compact.cpp @@ -165,7 +165,7 @@ public: OldClientContext ctx(txn, nss.ns()); BackgroundOperation::assertNoBgOpInProgForNs(nss.ns()); - log() << "compact " << nss.ns() << " begin, options: " << compactOptions.toString(); + log() << "compact " << nss.ns() << " begin, options: " << compactOptions; StatusWith<CompactStats> status = collection->compact(txn, &compactOptions); if (!status.isOK()) diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 0f18c9ad943..0bbdfb5e1cf 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -440,7 +440,7 @@ public: int was = _diaglog.setLevel(cmdObj.firstElement().numberInt()); _diaglog.flush(); if (!serverGlobalParams.quiet) { - LOG(0) << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was << endl; + LOG(0) << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was; } result.append("was", was); result.append("note", deprecationWarning); @@ -650,7 +650,7 @@ public: if (partialOk) { break; // skipped chunk is probably on another shard } - log() << "should have chunk: " << n << " have:" << myn << endl; + log() << "should have chunk: " << n << " have:" << myn; dumpChunks(txn, ns, query, sort); uassert(10040, "chunks out of order", n == myn); } @@ -852,7 +852,7 @@ public: } if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { - warning() << "Internal error while reading " << ns << endl; + warning() << "Internal error while reading " << ns; return appendCommandStatus( result, Status(ErrorCodes::OperationFailed, @@ -1398,7 +1398,7 @@ bool Command::run(OperationContext* txn, serverGlobalParams.clusterRole == ClusterRole::ConfigServer ? 0 : 2; LOG(debugLevel) << "Command on database " << db << " timed out waiting for read concern to be satisfied. Command: " - << getRedactedCopyForLogging(request.getCommandArgs()); + << redact(getRedactedCopyForLogging(request.getCommandArgs())); } auto result = appendCommandStatus(inPlaceReplyBob, rcStatus); diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index e571bfee3d3..d1658402a63 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -246,8 +246,8 @@ public: // Return an error if execution fails for any reason. if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { log() << "Plan executor error during distinct command: " - << PlanExecutor::statestr(state) - << ", stats: " << Explain::getWinningPlanStats(executor.getValue().get()); + << redact(PlanExecutor::statestr(state)) + << ", stats: " << redact(Explain::getWinningPlanStats(executor.getValue().get())); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp index da54c84968b..347e3762d9e 100644 --- a/src/mongo/db/commands/drop_indexes.cpp +++ b/src/mongo/db/commands/drop_indexes.cpp @@ -126,7 +126,7 @@ public: const NamespaceString toReIndexNs = parseNsCollectionRequired(dbname, jsobj); - LOG(0) << "CMD: reIndex " << toReIndexNs << endl; + LOG(0) << "CMD: reIndex " << toReIndexNs; ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X); diff --git a/src/mongo/db/commands/eval.cpp b/src/mongo/db/commands/eval.cpp index 055c3a2dc68..8e822717b19 100644 --- a/src/mongo/db/commands/eval.cpp +++ b/src/mongo/db/commands/eval.cpp @@ -112,8 +112,8 @@ bool dbEval(OperationContext* txn, if (argsElement.type() == Array) { args = argsElement.embeddedObject(); if (edebug) { - log() << "args:" << args.toString() << endl; - log() << "code:\n" << code << endl; + log() << "args:" << args; + log() << "code:\n" << redact(code); } } } @@ -124,11 +124,11 @@ bool dbEval(OperationContext* txn, res = s->invoke(f, &args, 0, 0); int m = t.millis(); if (m > serverGlobalParams.slowMS) { - log() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl; + log() << "dbeval slow, time: " << dec << m << "ms " << dbName; if (m >= 1000) - log() << code << endl; + log() << redact(code); else - OCCASIONALLY log() << code << endl; + OCCASIONALLY log() << redact(code); } } diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 5282fa72250..b868d37e323 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -117,7 +117,7 @@ StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* txn, if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { error() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state) - << ", stats: " << Explain::getWinningPlanStats(exec); + << ", stats: " << redact(Explain::getWinningPlanStats(exec)); if (WorkingSetCommon::isValidStatusMemberObject(value)) { const Status errorStatus = WorkingSetCommon::getMemberObjectStatus(value); diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index c83080a2d70..50eb829b1f3 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -348,7 +348,7 @@ public: if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { firstBatch.abandon(); error() << "Plan executor error during find command: " << PlanExecutor::statestr(state) - << ", stats: " << Explain::getWinningPlanStats(exec.get()); + << ", stats: " << redact(Explain::getWinningPlanStats(exec.get())); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index 2536a2f6f8e..bf91bd0cc4b 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -77,7 +77,7 @@ public: try { doRealWork(); } catch (std::exception& e) { - error() << "FSyncLockThread exception: " << e.what() << endl; + error() << "FSyncLockThread exception: " << e.what(); } } }; @@ -135,7 +135,7 @@ public: bool sync = !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately bool lock = cmdObj["lock"].trueValue(); - log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl; + log() << "CMD fsync: sync:" << sync << " lock:" << lock; if (lock) { if (!sync) { errmsg = "fsync: sync option must be true when using lock"; @@ -153,8 +153,8 @@ public: if (!status.isOK()) return appendCommandStatus(result, status); - log() << "db is now locked, no writes allowed. db.fsyncUnlock() to unlock" << endl; - log() << " For more info see " << FSyncCommand::url() << endl; + log() << "db is now locked, no writes allowed. db.fsyncUnlock() to unlock"; + log() << " For more info see " << FSyncCommand::url(); result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock"); result.append("seeAlso", FSyncCommand::url()); @@ -243,7 +243,7 @@ void FSyncLockThread::doRealWork() { try { getDur().syncDataAndTruncateJournal(&txn); } catch (std::exception& e) { - error() << "error doing syncDataAndTruncateJournal: " << e.what() << endl; + error() << "error doing syncDataAndTruncateJournal: " << e.what(); fsyncCmd.status = Status(ErrorCodes::CommandFailed, e.what()); fsyncCmd._threadSync.notify_one(); fsyncCmd.locked = false; @@ -255,7 +255,7 @@ void FSyncLockThread::doRealWork() { try { storageEngine->flushAllFiles(true); } catch (std::exception& e) { - error() << "error doing flushAll: " << e.what() << endl; + error() << "error doing flushAll: " << e.what(); fsyncCmd.status = Status(ErrorCodes::CommandFailed, e.what()); fsyncCmd._threadSync.notify_one(); fsyncCmd.locked = false; @@ -267,7 +267,7 @@ void FSyncLockThread::doRealWork() { } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "beginBackup", "global"); } catch (const DBException& e) { - error() << "storage engine unable to begin backup : " << e.toString() << endl; + error() << "storage engine unable to begin backup : " << e.toString(); fsyncCmd.status = e.toStatus(); fsyncCmd._threadSync.notify_one(); fsyncCmd.locked = false; diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp index 5f0433576bb..bccffe0615b 100644 --- a/src/mongo/db/commands/geo_near_cmd.cpp +++ b/src/mongo/db/commands/geo_near_cmd.cpp @@ -271,7 +271,7 @@ public: // Don't make a too-big result object. if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) { - warning() << "Too many geoNear results for query " << rewritten.toString() + warning() << "Too many geoNear results for query " << redact(rewritten) << ", truncating output."; break; } @@ -299,7 +299,7 @@ public: // Return an error if execution fails for any reason. if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state) - << ", stats: " << Explain::getWinningPlanStats(exec.get()); + << ", stats: " << redact(Explain::getWinningPlanStats(exec.get())); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index cbcf44f19d0..1bdef2fc8f1 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -543,7 +543,7 @@ public: nextBatch->abandon(); error() << "GetMore command executor error: " << PlanExecutor::statestr(*state) - << ", stats: " << Explain::getWinningPlanStats(exec); + << ", stats: " << redact(Explain::getWinningPlanStats(exec)); return Status(ErrorCodes::OperationFailed, str::stream() << "GetMore command executor error: " diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp index 5a8b98f4390..ff315323a73 100644 --- a/src/mongo/db/commands/index_filter_commands.cpp +++ b/src/mongo/db/commands/index_filter_commands.cpp @@ -269,7 +269,7 @@ Status ClearFilters::clear(OperationContext* txn, // Remove entry from plan cache planCache->remove(*cq); - LOG(0) << "Removed index filter on " << ns << " " << cq->toStringShort(); + LOG(0) << "Removed index filter on " << ns << " " << redact(cq->toStringShort()); return Status::OK(); } @@ -396,7 +396,8 @@ Status SetFilter::set(OperationContext* txn, // Remove entry from plan cache. planCache->remove(*cq); - LOG(0) << "Index filter set on " << ns << " " << cq->toStringShort() << " " << indexesElt; + LOG(0) << "Index filter set on " << ns << " " << redact(cq->toStringShort()) << " " + << indexesElt; return Status::OK(); } diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 26b40571fd0..0cd70f658bc 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -792,7 +792,7 @@ State::~State() { try { dropTempCollections(); } catch (std::exception& e) { - error() << "couldn't cleanup after map reduce: " << e.what() << endl; + error() << "couldn't cleanup after map reduce: " << e.what(); } } if (_scope && !_scope->isKillPending() && _scope->getError().empty()) { @@ -803,7 +803,7 @@ State::~State() { _scope->invoke(cleanup, 0, 0, 0, true); } catch (const DBException&) { // not important because properties will be reset if scope is reused - LOG(1) << "MapReduce terminated during state destruction" << endl; + LOG(1) << "MapReduce terminated during state destruction"; } } } @@ -946,7 +946,7 @@ void State::switchMode(bool jsMode) { } void State::bailFromJS() { - LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl; + LOG(1) << "M/R: Switching from JS mode to mixed mode"; // reduce and reemit into c++ switchMode(false); @@ -1232,7 +1232,7 @@ void State::reduceAndSpillInMemoryStateIfNeeded() { _scope->invoke(_reduceAll, 0, 0, 0, true); LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() - << "ms" << endl; + << "ms"; return; } } @@ -1246,12 +1246,12 @@ void State::reduceAndSpillInMemoryStateIfNeeded() { Timer t; reduceInMemory(); LOG(3) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount - << " newSize=" << _size << " time=" << t.millis() << "ms" << endl; + << " newSize=" << _size << " time=" << t.millis() << "ms"; // if size is still high, or values are not reducing well, dump if (_onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2)) { dumpToInc(); - LOG(3) << " MR - dumping to db" << endl; + LOG(3) << " MR - dumping to db"; } } } @@ -1354,7 +1354,7 @@ public: Config config(dbname, cmd); - LOG(1) << "mr ns: " << config.ns << endl; + LOG(1) << "mr ns: " << config.ns; uassert(16149, "cannot run map reduce without the js engine", globalScriptEngine); @@ -1616,19 +1616,19 @@ public: return false; } } catch (SendStaleConfigException& e) { - log() << "mr detected stale config, should retry" << causedBy(e) << endl; + log() << "mr detected stale config, should retry" << redact(e); throw e; } // TODO: The error handling code for queries is v. fragile, // *requires* rethrow AssertionExceptions - should probably fix. catch (AssertionException& e) { - log() << "mr failed, removing collection" << causedBy(e) << endl; + log() << "mr failed, removing collection" << redact(e); throw e; } catch (std::exception& e) { - log() << "mr failed, removing collection" << causedBy(e) << endl; + log() << "mr failed, removing collection" << causedBy(e); throw e; } catch (...) { - log() << "mr failed for unknown reason, removing collection" << endl; + log() << "mr failed for unknown reason, removing collection"; throw; } diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp index af46c6f1d80..69f8a1ecccb 100644 --- a/src/mongo/db/commands/pipeline_command.cpp +++ b/src/mongo/db/commands/pipeline_command.cpp @@ -259,8 +259,7 @@ boost::intrusive_ptr<Pipeline> reparsePipeline( if (!reparsedPipeline.isOK()) { error() << "Aggregation command did not round trip through parsing and serialization " "correctly. Input pipeline: " - << Value(request.getPipeline()).toString() - << ", serialized pipeline: " << Value(serialized).toString(); + << Value(request.getPipeline()) << ", serialized pipeline: " << Value(serialized); fassertFailedWithStatusNoTrace(40175, reparsedPipeline.getStatus()); } diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp index 0fb1a5b1d57..25b76b6fbe9 100644 --- a/src/mongo/db/commands/plan_cache_commands.cpp +++ b/src/mongo/db/commands/plan_cache_commands.cpp @@ -311,7 +311,7 @@ Status PlanCacheClear::clear(OperationContext* txn, if (!planCache->contains(*cq)) { // Log if asked to clear non-existent query shape. LOG(1) << ns << ": query shape doesn't exist in PlanCache - " - << cq->getQueryObj().toString() << "(sort: " << cq->getQueryRequest().getSort() + << redact(cq->getQueryObj()) << "(sort: " << cq->getQueryRequest().getSort() << "; projection: " << cq->getQueryRequest().getProj() << "; collation: " << cq->getQueryRequest().getCollation() << ")"; return Status::OK(); @@ -322,7 +322,7 @@ Status PlanCacheClear::clear(OperationContext* txn, return result; } - LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString() + LOG(1) << ns << ": removed plan cache entry - " << redact(cq->getQueryObj()) << "(sort: " << cq->getQueryRequest().getSort() << "; projection: " << cq->getQueryRequest().getProj() << "; collation: " << cq->getQueryRequest().getCollation() << ")"; diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp index dbbf5582323..67716cf7a14 100644 --- a/src/mongo/db/commands/server_status.cpp +++ b/src/mongo/db/commands/server_status.cpp @@ -167,7 +167,7 @@ public: timeBuilder.appendNumber("at end", durationCount<Milliseconds>(runElapsed)); if (runElapsed > Milliseconds(1000)) { BSONObj t = timeBuilder.obj(); - log() << "serverStatus was very slow: " << t << endl; + log() << "serverStatus was very slow: " << t; result.append("timing", t); } diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index 12ae5131616..47ecb862d60 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -81,7 +81,7 @@ public: string& errmsg, BSONObjBuilder& result) { string coll = cmdObj["godinsert"].valuestrsafe(); - log() << "test only command godinsert invoked coll:" << coll << endl; + log() << "test only command godinsert invoked coll:" << coll; uassert(13049, "godinsert must specify a collection", !coll.empty()); string ns = dbname + "." + coll; BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck(); @@ -160,7 +160,7 @@ public: int, string& errmsg, BSONObjBuilder& result) { - log() << "test only command sleep invoked" << endl; + log() << "test only command sleep invoked"; long long millis = 0; if (cmdObj["secs"] || cmdObj["millis"]) { diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index 2238383d960..797ea21fe4e 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -2468,7 +2468,7 @@ public: if (!status.isOK()) { // Match the behavior of mongorestore to continue on failure warning() << "Could not update user " << userName - << " in _mergeAuthzCollections command: " << status << endl; + << " in _mergeAuthzCollections command: " << redact(status); } } else { auditCreateOrUpdateUser(userObj, true); @@ -2476,7 +2476,7 @@ public: if (!status.isOK()) { // Match the behavior of mongorestore to continue on failure warning() << "Could not insert user " << userName - << " in _mergeAuthzCollections command: " << status << endl; + << " in _mergeAuthzCollections command: " << redact(status); } } usersToDrop->erase(userName); @@ -2506,7 +2506,7 @@ public: if (!status.isOK()) { // Match the behavior of mongorestore to continue on failure warning() << "Could not update role " << roleName - << " in _mergeAuthzCollections command: " << status << endl; + << " in _mergeAuthzCollections command: " << redact(status); } } else { auditCreateOrUpdateRole(roleObj, true); @@ -2514,7 +2514,7 @@ public: if (!status.isOK()) { // Match the behavior of mongorestore to continue on failure warning() << "Could not insert role " << roleName - << " in _mergeAuthzCollections command: " << status << endl; + << " in _mergeAuthzCollections command: " << redact(status); } } rolesToDrop->erase(roleName); @@ -2733,7 +2733,7 @@ public: * Logs that the auth schema upgrade failed because of "status" and returns "status". */ Status logUpgradeFailed(const Status& status) { - log() << "Auth schema upgrade failed with " << status; + log() << "Auth schema upgrade failed with " << redact(status); return status; } diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp index 0119e0db238..f1d5ff4a349 100644 --- a/src/mongo/db/commands/validate.cpp +++ b/src/mongo/db/commands/validate.cpp @@ -107,7 +107,7 @@ public: } if (!serverGlobalParams.quiet) { - LOG(0) << "CMD: validate " << ns << endl; + LOG(0) << "CMD: validate " << ns; } AutoGetDb ctx(txn, ns_string.db(), MODE_IX); diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp index 4eaa4799fee..dd5e246e3c1 100644 --- a/src/mongo/db/concurrency/lock_state.cpp +++ b/src/mongo/db/concurrency/lock_state.cpp @@ -215,7 +215,7 @@ void LockerImpl<IsForMMAPV1>::dump() const { } _lock.unlock(); - log() << ss.str() << std::endl; + log() << ss.str(); } diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index 9a07029937c..527847c4df6 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -246,7 +246,7 @@ ProgressMeter& CurOp::setMessage_inlock(const char* msg, int secondsBetween) { if (progressMeterTotal) { if (_progressMeter.isActive()) { - error() << "old _message: " << _message << " new message:" << msg; + error() << "old _message: " << redact(_message) << " new message:" << redact(msg); verify(!_progressMeter.isActive()); } _progressMeter.reset(progressMeterTotal, secondsBetween); diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 1020d5d1e89..35b1250a81a 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -355,7 +355,7 @@ void checkForCappedOplog(OperationContext* txn, Database* db) { } void repairDatabasesAndCheckVersion(OperationContext* txn) { - LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl; + LOG(1) << "enter repairDatabases (to check pdfile version #)"; ScopedTransaction transaction(txn, MODE_X); Lock::GlobalWrite lk(txn->lockState()); @@ -370,7 +370,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) { invariant(!storageGlobalParams.readOnly); for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string dbName = *i; - LOG(1) << " Repairing database: " << dbName << endl; + LOG(1) << " Repairing database: " << dbName; fassert(18506, repairDatabase(txn, storageEngine, dbName)); } @@ -390,7 +390,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) { for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string dbName = *i; - LOG(1) << " Recovering database: " << dbName << endl; + LOG(1) << " Recovering database: " << dbName; Database* db = dbHolder().openDb(txn, dbName); invariant(db); @@ -409,7 +409,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) { } severe() << "Unable to start mongod due to an incompatibility with the data files and" " this version of mongod: " - << status; + << redact(status); severe() << "Please consult our documentation when trying to downgrade to a previous" " major release"; quickExit(EXIT_NEED_UPGRADE); @@ -462,7 +462,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) { const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { - log() << "Problem with index " << index << ": " << keyStatus.reason() + log() << "Problem with index " << index << ": " << redact(keyStatus) << " This index can still be used however it cannot be rebuilt." << " For more info see" << " http://dochub.mongodb.org/core/index-validation" << startupWarningsLog; @@ -501,7 +501,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) { } } - LOG(1) << "done repairDatabases" << endl; + LOG(1) << "done repairDatabases"; } void _initWireSpec() { @@ -568,7 +568,7 @@ ExitCode _initAndListen(int listenPort) { auto transportLayer = stdx::make_unique<transport::TransportLayerLegacy>(options, sepPtr); auto res = transportLayer->setup(); if (!res.isOK()) { - error() << "Failed to set up listener: " << res.toString(); + error() << "Failed to set up listener: " << res; return EXIT_NET_ERROR; } @@ -671,7 +671,7 @@ ExitCode _initAndListen(int listenPort) { repairDatabasesAndCheckVersion(startupOpCtx.get()); if (storageGlobalParams.upgrade) { - log() << "finished checking dbs" << endl; + log() << "finished checking dbs"; exitCleanly(EXIT_CLEAN); } @@ -697,7 +697,7 @@ ExitCode _initAndListen(int listenPort) { if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = authindex::verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { - log() << status.reason(); + log() << redact(status); exitCleanly(EXIT_NEED_UPGRADE); } @@ -709,7 +709,7 @@ ExitCode _initAndListen(int listenPort) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final - << " but startup could not verify schema version: " << status.toString() << endl; + << " but startup could not verify schema version: " << status; exitCleanly(EXIT_NEED_UPGRADE); } if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) { @@ -718,7 +718,7 @@ ExitCode _initAndListen(int listenPort) { << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but found " << foundSchemaVersion << ". In order to upgrade " << "the auth schema, first downgrade MongoDB binaries to version " - << "2.6 and then run the authSchemaUpgrade command." << endl; + << "2.6 and then run the authSchemaUpgrade command."; exitCleanly(EXIT_NEED_UPGRADE); } } else if (globalAuthzManager->isAuthEnabled()) { @@ -810,16 +810,16 @@ ExitCode initAndListen(int listenPort) { try { return _initAndListen(listenPort); } catch (DBException& e) { - log() << "exception in initAndListen: " << e.toString() << ", terminating" << endl; + log() << "exception in initAndListen: " << e.toString() << ", terminating"; return EXIT_UNCAUGHT; } catch (std::exception& e) { log() << "exception in initAndListen std::exception: " << e.what() << ", terminating"; return EXIT_UNCAUGHT; } catch (int& n) { - log() << "exception in initAndListen int: " << n << ", terminating" << endl; + log() << "exception in initAndListen int: " << n << ", terminating"; return EXIT_UNCAUGHT; } catch (...) { - log() << "exception in initAndListen, terminating" << endl; + log() << "exception in initAndListen, terminating"; return EXIT_UNCAUGHT; } } @@ -829,7 +829,7 @@ ExitCode initAndListen(int listenPort) { #if defined(_WIN32) ExitCode initService() { ntservice::reportStatus(SERVICE_RUNNING); - log() << "Service running" << endl; + log() << "Service running"; return initAndListen(serverGlobalParams.port); } #endif diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 65f8c801447..fa728c3bdca 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -429,8 +429,8 @@ long long Helpers::removeRange(OperationContext* txn, NamespaceString nss(ns); if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) { warning() << "stepped down from primary while deleting chunk; " - << "orphaning data in " << ns << " in range [" << min << ", " << max - << ")"; + << "orphaning data in " << ns << " in range [" << redact(min) << ", " + << redact(max) << ")"; return numDeleted; } @@ -518,14 +518,14 @@ Helpers::RemoveSaver::~RemoveSaver() { Status status = _protector->finalize(protectedBuffer.get(), protectedSizeMax, &resultLen); if (!status.isOK()) { severe() << "Unable to finalize DataProtector while closing RemoveSaver: " - << status.reason(); + << redact(status); fassertFailed(34350); } _out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen); if (_out->fail()) { severe() << "Couldn't write finalized DataProtector data to: " << _file.string() - << " for remove saving: " << errnoWithDescription(); + << " for remove saving: " << redact(errnoWithDescription()); fassertFailed(34351); } @@ -533,7 +533,7 @@ Helpers::RemoveSaver::~RemoveSaver() { status = _protector->finalizeTag(protectedBuffer.get(), protectedSizeMax, &resultLen); if (!status.isOK()) { severe() << "Unable to get finalizeTag from DataProtector while closing RemoveSaver: " - << status.reason(); + << redact(status); fassertFailed(34352); } if (resultLen != _protector->getNumberOfBytesReservedForTag()) { @@ -546,7 +546,7 @@ Helpers::RemoveSaver::~RemoveSaver() { _out->write(reinterpret_cast<const char*>(protectedBuffer.get()), resultLen); if (_out->fail()) { severe() << "Couldn't write finalizeTag from DataProtector to: " << _file.string() - << " for remove saving: " << errnoWithDescription(); + << " for remove saving: " << redact(errnoWithDescription()); fassertFailed(34354); } } @@ -558,7 +558,7 @@ Status Helpers::RemoveSaver::goingToDelete(const BSONObj& o) { _out.reset(new ofstream(_file.string().c_str(), ios_base::out | ios_base::binary)); if (_out->fail()) { string msg = str::stream() << "couldn't create file: " << _file.string() - << " for remove saving: " << errnoWithDescription(); + << " for remove saving: " << redact(errnoWithDescription()); error() << msg; _out.reset(); _out = 0; @@ -591,7 +591,7 @@ Status Helpers::RemoveSaver::goingToDelete(const BSONObj& o) { _out->write(reinterpret_cast<const char*>(data), dataSize); if (_out->fail()) { string msg = str::stream() << "couldn't write document to file: " << _file.string() - << " for remove saving: " << errnoWithDescription(); + << " for remove saving: " << redact(errnoWithDescription()); error() << msg; return Status(ErrorCodes::OperationFailed, msg); } diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp index 738a974ea89..f1867423b78 100644 --- a/src/mongo/db/exec/cached_plan.cpp +++ b/src/mongo/db/exec/cached_plan.cpp @@ -139,9 +139,9 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj); LOG(1) << "Execution of cached plan failed, falling back to replan." - << " query: " << _canonicalQuery->toStringShort() - << " planSummary: " << Explain::getPlanSummary(child().get()) - << " status: " << statusObj; + << " query: " << redact(_canonicalQuery->toStringShort()) + << " planSummary: " << redact(Explain::getPlanSummary(child().get())) + << " status: " << redact(statusObj); const bool shouldCache = false; return replan(yieldPolicy, shouldCache); @@ -150,9 +150,9 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj); LOG(1) << "Execution of cached plan failed: PlanStage died" - << ", query: " << _canonicalQuery->toStringShort() - << " planSummary: " << Explain::getPlanSummary(child().get()) - << " status: " << statusObj; + << ", query: " << redact(_canonicalQuery->toStringShort()) + << " planSummary: " << redact(Explain::getPlanSummary(child().get())) + << " status: " << redact(statusObj); return WorkingSetCommon::getMemberObjectStatus(statusObj); } else { @@ -165,8 +165,8 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { LOG(1) << "Execution of cached plan required " << maxWorksBeforeReplan << " works, but was originally cached with only " << _decisionWorks << " works. Evicting cache entry and replanning query: " - << _canonicalQuery->toStringShort() - << " plan summary before replan: " << Explain::getPlanSummary(child().get()); + << redact(_canonicalQuery->toStringShort()) + << " plan summary before replan: " << redact(Explain::getPlanSummary(child().get())); const bool shouldCache = true; return replan(yieldPolicy, shouldCache); @@ -240,8 +240,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) { LOG(1) << "Replanning of query resulted in single query solution, which will not be cached. " - << _canonicalQuery->toStringShort() - << " plan summary after replan: " << Explain::getPlanSummary(child().get()) + << redact(_canonicalQuery->toStringShort()) + << " plan summary after replan: " << redact(Explain::getPlanSummary(child().get())) << " previous cache entry evicted: " << (shouldCache ? "yes" : "no"); return Status::OK(); } @@ -273,8 +273,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) { return pickBestPlanStatus; } - LOG(1) << "Replanning " << _canonicalQuery->toStringShort() - << " resulted in plan with summary: " << Explain::getPlanSummary(child().get()) + LOG(1) << "Replanning " << redact(_canonicalQuery->toStringShort()) + << " resulted in plan with summary: " << redact(Explain::getPlanSummary(child().get())) << ", which " << (shouldCache ? "has" : "has not") << " been written to the cache"; return Status::OK(); } @@ -333,9 +333,9 @@ void CachedPlanStage::updatePlanCache() { PlanCache* cache = _collection->infoCache()->getPlanCache(); Status fbs = cache->feedback(*_canonicalQuery, feedback.release()); if (!fbs.isOK()) { - LOG(5) << _canonicalQuery->ns() - << ": Failed to update cache with feedback: " << fbs.toString() << " - " - << "(query: " << _canonicalQuery->getQueryObj() + LOG(5) << _canonicalQuery->ns() << ": Failed to update cache with feedback: " << redact(fbs) + << " - " + << "(query: " << redact(_canonicalQuery->getQueryObj()) << "; sort: " << _canonicalQuery->getQueryRequest().getSort() << "; projection: " << _canonicalQuery->getQueryRequest().getProj() << ") is no longer in plan cache."; diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp index fa1974ae6d0..bbf0c316d13 100644 --- a/src/mongo/db/exec/geo_near.cpp +++ b/src/mongo/db/exec/geo_near.cpp @@ -123,12 +123,12 @@ static void extractGeometries(const BSONObj& doc, // Valid geometry element geometries->push_back(stored.release()); } else { - warning() << "geoNear stage read non-geometry element " << nextEl.toString() - << " in array " << el.toString(); + warning() << "geoNear stage read non-geometry element " << redact(nextEl) + << " in array " << redact(el); } } } else { - warning() << "geoNear stage read non-geometry element " << el.toString(); + warning() << "geoNear stage read non-geometry element " << redact(el); } } } diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index 9a7275c12f3..28c14dbcba3 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -118,7 +118,7 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) { StageState state = bestPlan.root->work(out); if (PlanStage::FAILURE == state && hasBackupPlan()) { - LOG(5) << "Best plan errored out switching to backup\n"; + LOG(5) << "Best plan errored out switching to backup"; // Uncache the bad solution if we fall back // on the backup solution. // @@ -136,7 +136,7 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) { } if (hasBackupPlan() && PlanStage::ADVANCED == state) { - LOG(5) << "Best plan had a blocking stage, became unblocked\n"; + LOG(5) << "Best plan had a blocking stage, became unblocked"; _backupPlanIdx = kNoSuchPlan; } @@ -238,15 +238,15 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { std::list<WorkingSetID>& alreadyProduced = bestCandidate.results; const auto& bestSolution = bestCandidate.solution; - LOG(5) << "Winning solution:\n" << bestSolution->toString() << endl; - LOG(2) << "Winning plan: " << Explain::getPlanSummary(bestCandidate.root); + LOG(5) << "Winning solution:\n" << redact(bestSolution->toString()); + LOG(2) << "Winning plan: " << redact(Explain::getPlanSummary(bestCandidate.root)); _backupPlanIdx = kNoSuchPlan; if (bestSolution->hasBlockingStage && (0 == alreadyProduced.size())) { - LOG(5) << "Winner has blocking stage, looking for backup plan...\n"; + LOG(5) << "Winner has blocking stage, looking for backup plan..."; for (size_t ix = 0; ix < _candidates.size(); ++ix) { if (!_candidates[ix].solution->hasBlockingStage) { - LOG(5) << "Candidate " << ix << " is backup child\n"; + LOG(5) << "Candidate " << ix << " is backup child"; _backupPlanIdx = ix; break; } @@ -275,11 +275,11 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { size_t runnerUpIdx = ranking->candidateOrder[1]; LOG(1) << "Winning plan tied with runner-up. Not caching." - << " ns: " << _collection->ns() << " " << _query->toStringShort() - << " winner score: " << ranking->scores[0] - << " winner summary: " << Explain::getPlanSummary(_candidates[winnerIdx].root) + << " ns: " << _collection->ns() << " " << redact(_query->toStringShort()) + << " winner score: " << ranking->scores[0] << " winner summary: " + << redact(Explain::getPlanSummary(_candidates[winnerIdx].root)) << " runner-up score: " << ranking->scores[1] << " runner-up summary: " - << Explain::getPlanSummary(_candidates[runnerUpIdx].root); + << redact(Explain::getPlanSummary(_candidates[runnerUpIdx].root)); } if (alreadyProduced.empty()) { @@ -289,9 +289,9 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { size_t winnerIdx = ranking->candidateOrder[0]; LOG(1) << "Winning plan had zero results. Not caching." - << " ns: " << _collection->ns() << " " << _query->toStringShort() - << " winner score: " << ranking->scores[0] - << " winner summary: " << Explain::getPlanSummary(_candidates[winnerIdx].root); + << " ns: " << _collection->ns() << " " << redact(_query->toStringShort()) + << " winner score: " << ranking->scores[0] << " winner summary: " + << redact(Explain::getPlanSummary(_candidates[winnerIdx].root)); } } @@ -316,7 +316,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { for (size_t ix = 0; ix < solutions.size(); ++ix) { if (NULL == solutions[ix]->cacheData.get()) { LOG(5) << "Not caching query because this solution has no cache data: " - << solutions[ix]->toString(); + << redact(solutions[ix]->toString()); validSolutions = false; break; } diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp index 7d6deac699c..b16dd789915 100644 --- a/src/mongo/db/exec/projection.cpp +++ b/src/mongo/db/exec/projection.cpp @@ -203,7 +203,7 @@ PlanStage::StageState ProjectionStage::doWork(WorkingSetID* out) { // Punt to our specific projection impl. Status projStatus = transform(member); if (!projStatus.isOK()) { - warning() << "Couldn't execute projection, status = " << projStatus.toString() << endl; + warning() << "Couldn't execute projection, status = " << redact(projStatus); *out = WorkingSetCommon::allocateStatusMember(_ws, projStatus); return PlanStage::FAILURE; } diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp index 9c2f142622b..84318b02357 100644 --- a/src/mongo/db/exec/shard_filter.cpp +++ b/src/mongo/db/exec/shard_filter.cpp @@ -91,7 +91,7 @@ PlanStage::StageState ShardFilterStage::doWork(WorkingSetID* out) { "query planning has failed"); // Fail loudly and cleanly in production, fatally in debug - error() << status.toString(); + error() << redact(status); dassert(false); _ws->free(*out); @@ -101,8 +101,7 @@ PlanStage::StageState ShardFilterStage::doWork(WorkingSetID* out) { // Skip this document with a warning - no shard key should not be possible // unless manually inserting data into a shard - warning() << "no shard key found in document " << member->obj.value().toString() - << " " + warning() << "no shard key found in document " << redact(member->obj.value()) << " " << "for shard key pattern " << _metadata->getKeyPattern() << ", " << "document may have been inserted manually into shard"; } diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index f37a5ddc4e9..13da0300c94 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -210,7 +210,7 @@ public: if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { error() << "Plan executor error during StageDebug command: " << PlanExecutor::statestr(state) - << ", stats: " << Explain::getWinningPlanStats(exec.get()); + << ", stats: " << redact(Explain::getWinningPlanStats(exec.get())); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp index f98f088ca9c..d7cbaed6db2 100644 --- a/src/mongo/db/exec/subplan.cpp +++ b/src/mongo/db/exec/subplan.cpp @@ -173,7 +173,7 @@ Status SubplanStage::planSubqueries() { for (size_t i = 0; i < _plannerParams.indices.size(); ++i) { const IndexEntry& ie = _plannerParams.indices[i]; _indexMap[ie.name] = i; - LOG(5) << "Subplanner: index " << i << " is " << ie.toString(); + LOG(5) << "Subplanner: index " << i << " is " << ie; } const ExtensionsCallbackReal extensionsCallback(getOpCtx(), &_collection->ns()); @@ -400,7 +400,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { return Status(ErrorCodes::BadValue, ss); } - LOG(5) << "Subplanner: fully tagged tree is " << solnRoot->toString(); + LOG(5) << "Subplanner: fully tagged tree is " << redact(solnRoot->toString()); // Takes ownership of 'solnRoot' _compositeSolution.reset( @@ -412,7 +412,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { return Status(ErrorCodes::BadValue, ss); } - LOG(5) << "Subplanner: Composite solution is " << _compositeSolution->toString(); + LOG(5) << "Subplanner: Composite solution is " << redact(_compositeSolution->toString()); // Use the index tags from planning each branch to construct the composite solution, // and set that solution as our child stage. diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp index c593498e683..47ba2528328 100644 --- a/src/mongo/db/geo/r2_region_coverer.cpp +++ b/src/mongo/db/geo/r2_region_coverer.cpp @@ -108,7 +108,8 @@ void R2RegionCoverer::getCovering(const R2Region& region, vector<GeoHash>* cover while (!_candidateQueue->empty()) { Candidate* candidate = _candidateQueue->top().second; // Owned _candidateQueue->pop(); - LOG(3) << "Pop: " << candidate->cell; + // REDACT?? I think this may have User info, but I'm not sure how to redact + LOG(3) << "Pop: " << redact(candidate->cell.toString()); // Try to expand this cell into its children if (candidate->cell.getBits() < _minLevel || candidate->numChildren == 1 || @@ -184,7 +185,8 @@ void R2RegionCoverer::addCandidate(Candidate* candidate) { int priority = -(((((int)candidate->cell.getBits() << 4) + candidate->numChildren) << 4) + numTerminals); _candidateQueue->push(make_pair(priority, candidate)); // queue owns candidate - LOG(3) << "Push: " << candidate->cell << " (" << priority << ") "; + // REDACT?? + LOG(3) << "Push: " << redact(candidate->cell.toString()) << " (" << priority << ") "; } } diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp index 00edf43e3ce..d58c5ce1dc1 100644 --- a/src/mongo/db/index/expression_keys_private.cpp +++ b/src/mongo/db/index/expression_keys_private.cpp @@ -571,7 +571,7 @@ void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj, if (keysToAdd.size() > params.maxKeysPerInsert) { warning() << "Insert of geo object generated a high number of keys." - << " num keys: " << keysToAdd.size() << " obj inserted: " << obj; + << " num keys: " << keysToAdd.size() << " obj inserted: " << redact(obj); } *keys = keysToAdd; diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp index 64b699332e3..95822b21dc8 100644 --- a/src/mongo/db/index/haystack_access_method.cpp +++ b/src/mongo/db/index/haystack_access_method.cpp @@ -77,8 +77,8 @@ void HaystackAccessMethod::searchCommand(OperationContext* txn, unsigned limit) { Timer t; - LOG(1) << "SEARCH near:" << nearObj << " maxDistance:" << maxDistance << " search: " << search - << endl; + LOG(1) << "SEARCH near:" << redact(nearObj) << " maxDistance:" << maxDistance + << " search: " << redact(search); int x, y; { BSONObjIterator i(nearObj); diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index 86c045c0257..b832bcdcf72 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -176,8 +176,8 @@ void IndexAccessMethod::removeOneKey(OperationContext* txn, try { _newInterface->unindex(txn, key, loc, dupsAllowed); } catch (AssertionException& e) { - log() << "Assertion failure: _unindex failed " << _descriptor->indexNamespace() << endl; - log() << "Assertion failure: _unindex failed: " << e.what() << " key:" << key.toString() + log() << "Assertion failure: _unindex failed " << _descriptor->indexNamespace(); + log() << "Assertion failure: _unindex failed: " << redact(e) << " key:" << key.toString() << " dl:" << loc; logContext(); } diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp index 3e27759a3a9..368ebfa57e9 100644 --- a/src/mongo/db/index/index_descriptor.cpp +++ b/src/mongo/db/index/index_descriptor.cpp @@ -93,11 +93,4 @@ bool IndexDescriptor::areIndexOptionsEquivalent(const IndexDescriptor* other) co rhs.second); }); } - -void IndexDescriptor::_checkOk() const { - if (_magic == 123987) - return; - log() << "uh oh: " << (void*)(this) << " " << _magic; - invariant(0); -} } diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h index cead1965b9e..c41131d578e 100644 --- a/src/mongo/db/index/index_descriptor.h +++ b/src/mongo/db/index/index_descriptor.h @@ -58,8 +58,7 @@ public: * infoObj is a copy of the index-describing BSONObj contained in the OnDiskIndexData. */ IndexDescriptor(Collection* collection, const std::string& accessMethodName, BSONObj infoObj) - : _magic(123987), - _collection(collection), + : _collection(collection), _accessMethodName(accessMethodName), _infoObj(infoObj.getOwned()), _numFields(infoObj.getObjectField("key").nFields()), @@ -80,9 +79,6 @@ public: } } - ~IndexDescriptor() { - _magic = 555; - } // // Information about the key pattern. @@ -94,7 +90,6 @@ public: * Example: {foo: 1, bar: -1} */ const BSONObj& keyPattern() const { - _checkOk(); return _keyPattern; } @@ -108,7 +103,6 @@ public: // How many fields do we index / are in the key pattern? int getNumFields() const { - _checkOk(); return _numFields; } @@ -118,7 +112,6 @@ public: // Return the name of the index. const std::string& indexName() const { - _checkOk(); return _indexName; } @@ -163,17 +156,14 @@ public: // Is this index multikey? bool isMultikey(OperationContext* opCtx) const { - _checkOk(); return _collection->getIndexCatalog()->isMultikey(opCtx, this); } MultikeyPaths getMultikeyPaths(OperationContext* opCtx) const { - _checkOk(); return _collection->getIndexCatalog()->getMultikeyPaths(opCtx, this); } bool isIdIndex() const { - _checkOk(); return _isIdIndex; } @@ -193,13 +183,11 @@ public: // Return a (rather compact) std::string representation. std::string toString() const { - _checkOk(); return _infoObj.toString(); } // Return the info object. const BSONObj& infoObj() const { - _checkOk(); return _infoObj; } @@ -229,10 +217,6 @@ public: } private: - void _checkOk() const; - - int _magic; - // Related catalog information of the parent collection Collection* _collection; diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp index 32b94a15ca0..8ba47c1a6be 100644 --- a/src/mongo/db/index/s2_access_method.cpp +++ b/src/mongo/db/index/s2_access_method.cpp @@ -74,8 +74,7 @@ S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterfac geoFields >= 1); if (descriptor->isSparse()) { - warning() << "Sparse option ignored for index spec " << descriptor->keyPattern().toString() - << "\n"; + warning() << "Sparse option ignored for index spec " << descriptor->keyPattern().toString(); } } diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp index a2a467390a6..fbefb512ff2 100644 --- a/src/mongo/db/index_builder.cpp +++ b/src/mongo/db/index_builder.cpp @@ -101,7 +101,7 @@ void IndexBuilder::run() { Status status = _build(&txn, db, true, &dlk); if (!status.isOK()) { - error() << "IndexBuilder could not build index: " << status.toString(); + error() << "IndexBuilder could not build index: " << redact(status); fassert(28555, ErrorCodes::isInterruption(status.code())); } } @@ -193,7 +193,7 @@ Status IndexBuilder::_build(OperationContext* txn, wunit.commit(); } if (!status.isOK()) { - error() << "bad status from index build: " << status; + error() << "bad status from index build: " << redact(status); } } catch (const DBException& e) { status = e.toStatus(); diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp index ed9e82a6edb..2d4c40f5747 100644 --- a/src/mongo/db/index_rebuilder.cpp +++ b/src/mongo/db/index_rebuilder.cpp @@ -127,7 +127,7 @@ void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) { indexer.commit(); wunit.commit(); } catch (const DBException& e) { - error() << "Index rebuilding did not complete: " << e.toString(); + error() << "Index rebuilding did not complete: " << redact(e); log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds"; // If anything went wrong, leave the indexes partially built so that we pick them up // again on restart. @@ -164,9 +164,9 @@ void restartInProgressIndexesFromLastShutdown(OperationContext* txn) { } checkNS(txn, collNames); } catch (const DBException& e) { - error() << "Index verification did not complete: " << e.toString(); + error() << "Index verification did not complete: " << redact(e); fassertFailedNoTrace(18643); } - LOG(1) << "checking complete" << endl; + LOG(1) << "checking complete"; } } diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp index acd16f3577d..c6d0983e1d0 100644 --- a/src/mongo/db/initialize_server_global_state.cpp +++ b/src/mongo/db/initialize_server_global_state.cpp @@ -289,7 +289,7 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection, new MessageEventDetailsEncoder, writer.getValue()))); if (serverGlobalParams.logAppend && exists) { - log() << "***** SERVER RESTARTED *****" << endl; + log() << "***** SERVER RESTARTED *****"; Status status = logger::RotatableFileWriter::Use(writer.getValue()).status(); if (!status.isOK()) return status; diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index 8f9ec0c4318..6a9b909a827 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -392,7 +392,7 @@ void receivedKillCursors(OperationContext* txn, Message& m) { uassert(13004, str::stream() << "sent negative cursors to kill: " << n, n >= 1); if (n > 2000) { - (n < 30000 ? warning() : error()) << "receivedKillCursors, n=" << n << endl; + (n < 30000 ? warning() : error()) << "receivedKillCursors, n=" << n; verify(n < 30000); } @@ -401,7 +401,7 @@ void receivedKillCursors(OperationContext* txn, Message& m) { int found = CursorManager::eraseCursorGlobalIfAuthorized(txn, n, cursorArray); if (shouldLog(logger::LogSeverity::Debug(1)) || found != n) { - LOG(found == n ? 1 : 0) << "killcursors: found " << found << " of " << n << endl; + LOG(found == n ? 1 : 0) << "killcursors: found " << found << " of " << n; } } @@ -531,7 +531,7 @@ void (*reportEventToSystem)(const char* msg) = 0; void mongoAbort(const char* msg) { if (reportEventToSystem) reportEventToSystem(msg); - severe() << msg; + severe() << redact(msg); ::abort(); } @@ -624,7 +624,7 @@ void assembleResponse(OperationContext* txn, int len = strlen(p); if (len > 400) - log() << curTimeMillis64() % 10000 << " long msg received, len:" << len << endl; + log() << curTimeMillis64() % 10000 << " long msg received, len:" << len; if (strcmp("end", p) == 0) dbresponse.response.setData(opReply, "dbMsg end no longer supported"); @@ -640,7 +640,7 @@ void assembleResponse(OperationContext* txn, logThreshold = 10; receivedKillCursors(txn, m); } else if (op != dbInsert && op != dbUpdate && op != dbDelete) { - log() << " operation isn't supported: " << static_cast<int>(op) << endl; + log() << " operation isn't supported: " << static_cast<int>(op); currentOp.done(); shouldLogOpDebug = true; } else { @@ -672,12 +672,12 @@ void assembleResponse(OperationContext* txn, } catch (const UserException& ue) { LastError::get(c).setLastError(ue.getCode(), ue.getInfo().msg); LOG(3) << " Caught Assertion in " << networkOpToString(op) << ", continuing " - << ue.toString() << endl; + << redact(ue); debug.exceptionInfo = ue.getInfo(); } catch (const AssertionException& e) { LastError::get(c).setLastError(e.getCode(), e.getInfo().msg); LOG(3) << " Caught Assertion in " << networkOpToString(op) << ", continuing " - << e.toString() << endl; + << redact(e); debug.exceptionInfo = e.getInfo(); shouldLogOpDebug = true; } @@ -694,8 +694,7 @@ void assembleResponse(OperationContext* txn, if (shouldLogOpDebug || debug.executionTime > logThreshold) { Locker::LockerInfo lockerInfo; txn->lockState()->getLockerInfo(&lockerInfo); - - log() << debug.report(&c, currentOp, lockerInfo.stats); + log() << redact(debug.report(&c, currentOp, lockerInfo.stats)); } if (currentOp.shouldDBProfile(debug.executionTime)) { @@ -729,14 +728,14 @@ void DiagLog::openFile() { log() << msg.ss.str(); uasserted(ErrorCodes::FileStreamFailed, msg.ss.str()); } else { - log() << "diagLogging using file " << name << endl; + log() << "diagLogging using file " << name; } } int DiagLog::setLevel(int newLevel) { stdx::lock_guard<stdx::mutex> lk(mutex); int old = level; - log() << "diagLogging level=" << newLevel << endl; + log() << "diagLogging level=" << newLevel; if (f == 0) { openFile(); } @@ -746,7 +745,7 @@ int DiagLog::setLevel(int newLevel) { void DiagLog::flush() { if (level) { - log() << "flushing diag log" << endl; + log() << "flushing diag log"; stdx::lock_guard<stdx::mutex> lk(mutex); f->flush(); } diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 3bc23d3fa93..cd802146305 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -161,8 +161,7 @@ void profile(OperationContext* txn, NetworkOp op) { } } catch (const AssertionException& assertionEx) { warning() << "Caught Assertion while trying to profile " << networkOpToString(op) - << " against " << CurOp::get(txn)->getNS() << ": " << assertionEx.toString() - << endl; + << " against " << CurOp::get(txn)->getNS() << ": " << redact(assertionEx); } } @@ -183,7 +182,7 @@ Status createProfileCollection(OperationContext* txn, Database* db) { } // system.profile namespace doesn't exist; create it - log() << "Creating profile collection: " << dbProfilingNS << endl; + log() << "Creating profile collection: " << dbProfilingNS; CollectionOptions collectionOptions; collectionOptions.capped = true; diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp index 572ddf4d716..a1d6315bfe1 100644 --- a/src/mongo/db/prefetch.cpp +++ b/src/mongo/db/prefetch.cpp @@ -87,7 +87,7 @@ void prefetchIndexPages(OperationContext* txn, invariant(iam); iam->touch(txn, obj); } catch (const DBException& e) { - LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl; + LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e); } break; } @@ -105,7 +105,7 @@ void prefetchIndexPages(OperationContext* txn, verify(iam); iam->touch(txn, obj); } catch (const DBException& e) { - LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl; + LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e); } } break; @@ -136,7 +136,7 @@ void prefetchRecordPages(OperationContext* txn, Database* db, const char* ns, co _dummy_char += *(result.objdata() + result.objsize() - 1); } } catch (const DBException& e) { - LOG(2) << "ignoring exception in prefetchRecordPages(): " << e.what() << endl; + LOG(2) << "ignoring exception in prefetchRecordPages(): " << redact(e); } } } @@ -175,7 +175,7 @@ void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSO return; } - LOG(4) << "index prefetch for op " << *opType << endl; + LOG(4) << "index prefetch for op " << *opType; // should we prefetch index pages on updates? if the update is in-place and doesn't change // indexed values, it is actually slower - a lot slower if there are a dozen indexes or @@ -248,7 +248,7 @@ public: } virtual Status setFromString(const string& prefetch) { - log() << "changing replication index prefetch behavior to " << prefetch << endl; + log() << "changing replication index prefetch behavior to " << prefetch; ReplSettings::IndexPrefetchConfig prefetchConfig; |