summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorRamon Fernandez <ramon@mongodb.com>2016-08-27 18:10:20 -0400
committerRamon Fernandez <ramon@mongodb.com>2016-08-29 18:59:24 -0400
commitba5960835a055a0638c1123c47cad984e315682a (patch)
tree149563abec633eaa0e057fd127b6dad4128f53d5 /src/mongo
parentde66470b9e14717c829e07010759a89437946f5b (diff)
downloadmongo-ba5960835a055a0638c1123c47cad984e315682a.tar.gz
SERVER-24991 log redaction for sharding, repl files
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/range_deleter.cpp5
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp6
-rw-r--r--src/mongo/db/repair_database.cpp2
-rw-r--r--src/mongo/db/repl/bgsync.cpp18
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp4
-rw-r--r--src/mongo/db/repl/data_replicator.cpp26
-rw-r--r--src/mongo/db/repl/database_cloner.cpp5
-rw-r--r--src/mongo/db/repl/master_slave.cpp38
-rw-r--r--src/mongo/db/repl/oplog.cpp18
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp2
-rw-r--r--src/mongo/db/repl/oplogreader.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp40
-rw-r--r--src/mongo/db/repl/rs_sync.cpp2
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp35
-rw-r--r--src/mongo/db/repl/task_runner.cpp4
17 files changed, 113 insertions, 100 deletions
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index be280d8aa4a..6f574293e21 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -116,7 +116,8 @@ static void logCursorsWaiting(RangeDeleteEntry* entry) {
}
log() << "waiting for open cursors before removing range "
- << "[" << entry->options.range.minKey << ", " << entry->options.range.maxKey << ") "
+ << "[" << redact(entry->options.range.minKey) << ", "
+ << redact(entry->options.range.maxKey) << ") "
<< "in " << entry->options.range.ns
<< (entry->lastLoggedTS == Date_t()
? string("")
@@ -493,7 +494,7 @@ void RangeDeleter::doWork() {
nextTask->stats.waitForReplEndTS = jsTime();
} else {
- warning() << "Error encountered while trying to delete range: " << errMsg << endl;
+ warning() << "Error encountered while trying to delete range: " << redact(errMsg);
}
}
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 4132dfde863..e064de33599 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -83,8 +83,8 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
// log the opId so the user can use it to cancel the delete using killOp.
unsigned int opId = txn->getOpID();
- log() << "Deleter starting delete for: " << ns << " from " << inclusiveLower << " -> "
- << exclusiveUpper << ", with opId: " << opId;
+ log() << "Deleter starting delete for: " << ns << " from " << redact(inclusiveLower) << " -> "
+ << redact(exclusiveUpper) << ", with opId: " << opId;
try {
*deletedDocs =
@@ -104,7 +104,7 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
}
log() << "rangeDeleter deleted " << *deletedDocs << " documents for " << ns << " from "
- << inclusiveLower << " -> " << exclusiveUpper;
+ << redact(inclusiveLower) << " -> " << redact(exclusiveUpper);
} catch (const DBException& ex) {
*errMsg = str::stream() << "Error encountered while deleting range: "
<< "ns" << ns << " from " << inclusiveLower << " -> "
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 8e726434f7c..f34050db044 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -140,7 +140,7 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
Status status = validateBSON(data.data(), data.size());
if (!status.isOK()) {
- log() << "Invalid BSON detected at " << id << ": " << status << ". Deleting.";
+ log() << "Invalid BSON detected at " << id << ": " << redact(status) << ". Deleting.";
cursor->save(); // 'data' is no longer valid.
{
WriteUnitOfWork wunit(txn);
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 42ec9938cdf..893efc14a94 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -184,12 +184,13 @@ void BackgroundSync::_run() {
try {
_runProducer();
} catch (const DBException& e) {
- std::string msg(str::stream() << "sync producer problem: " << e.toString());
+ std::string msg(str::stream() << "sync producer problem: " << redact(e));
error() << msg;
_replCoord->setMyHeartbeatMessage(msg);
sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors.
} catch (const std::exception& e2) {
- severe() << "sync producer exception: " << e2.what();
+ // redact(std::exception&) doesn't work
+ severe() << "sync producer exception: " << redact(e2.what());
fassertFailed(28546);
}
}
@@ -402,7 +403,7 @@ void BackgroundSync::_produce(OperationContext* txn) {
// This is bad because it means that our source
// has not returned oplog entries in ascending ts order, and they need to be.
- warning() << fetcherReturnStatus.toString();
+ warning() << redact(fetcherReturnStatus);
// Do not blacklist the server here, it will be blacklisted when we try to reuse it,
// if it can't return a matching oplog start from the last fetch oplog ts field.
return;
@@ -432,7 +433,7 @@ void BackgroundSync::_produce(OperationContext* txn) {
lastOpTimeFetched = _lastOpTimeFetched;
}
- log() << "Starting rollback due to " << fetcherReturnStatus;
+ log() << "Starting rollback due to " << redact(fetcherReturnStatus);
// Wait till all buffered oplog entries have drained and been applied.
auto lastApplied = _replCoord->getMyLastAppliedOpTime();
@@ -467,7 +468,7 @@ void BackgroundSync::_produce(OperationContext* txn) {
<< source << " for " << blacklistDuration << ".";
_replCoord->blacklistSyncSource(source, Date_t::now() + blacklistDuration);
} else if (!fetcherReturnStatus.isOK()) {
- warning() << "Fetcher error querying oplog: " << fetcherReturnStatus.toString();
+ warning() << "Fetcher error querying oplog: " << redact(fetcherReturnStatus);
}
}
@@ -597,7 +598,7 @@ void BackgroundSync::_rollback(OperationContext* txn,
if (ErrorCodes::UnrecoverableRollbackError == status.code()) {
fassertNoTrace(28723, status);
}
- warning() << "rollback cannot proceed at this time (retrying later): " << status;
+ warning() << "rollback cannot proceed at this time (retrying later): " << redact(status);
}
HostAndPort BackgroundSync::getSyncTarget() const {
@@ -672,14 +673,15 @@ long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
} catch (const DBException& ex) {
- severe() << "Problem reading " << rsOplogName << ": " << ex.toStatus();
+ severe() << "Problem reading " << rsOplogName << ": " << redact(ex);
fassertFailed(18904);
}
long long hash;
auto status = bsonExtractIntegerField(oplogEntry, kHashFieldName, &hash);
if (!status.isOK()) {
severe() << "Most recent entry in " << rsOplogName << " is missing or has invalid \""
- << kHashFieldName << "\" field. Oplog entry: " << oplogEntry << ": " << status;
+ << kHashFieldName << "\" field. Oplog entry: " << redact(oplogEntry) << ": "
+ << redact(status);
fassertFailed(18902);
}
return hash;
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index fa85b2b2fcc..83eed5ca3a7 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -401,7 +401,7 @@ void CollectionCloner::_insertDocumentsCallback(const executor::TaskExecutor::Ca
}
void CollectionCloner::_finishCallback(const Status& status) {
- LOG(1) << "CollectionCloner ns:" << _destNss << " finished with status: " << status;
+ LOG(1) << "CollectionCloner ns:" << _destNss << " finished with status: " << redact(status);
// Copy the status so we can change it below if needed.
auto finalStatus = status;
bool callCollectionLoader = false;
@@ -413,7 +413,7 @@ void CollectionCloner::_finishCallback(const Status& status) {
const auto loaderStatus = _collLoader->commit();
if (!loaderStatus.isOK()) {
warning() << "Failed to commit changes to collection " << _destNss.ns() << ": "
- << loaderStatus;
+ << redact(loaderStatus);
finalStatus = loaderStatus;
}
}
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index e6e962ba3da..6e29b2f3e1a 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -180,7 +180,7 @@ StatusWith<BSONObj> getLatestOplogEntry(executor::TaskExecutor* exec,
// wait for fetcher to get the oplog position.
fetcher.join();
if (statusToReturn.isOK()) {
- LOG(2) << "returning last oplog entry: " << entry << ", from: " << source
+ LOG(2) << "returning last oplog entry: " << redact(entry) << ", from: " << source
<< ", ns: " << oplogNS;
return entry;
}
@@ -486,6 +486,7 @@ Status DataReplicator::_runInitialSyncAttempt_inlock(OperationContext* txn,
if (!cd.status.isOK()) {
error() << "Error while being called to drop/create oplog and drop users "
<< "databases, oplogNS: " << _opts.localOplogNS
+ // REDACT cd??
<< " with status:" << cd.status.toString();
statusFromWrites = cd.status;
return;
@@ -814,13 +815,13 @@ StatusWith<OpTimeWithHash> DataReplicator::doInitialSync(OperationContext* txn,
}
void DataReplicator::_onDataClonerFinish(const Status& status, HostAndPort syncSource) {
- log() << "data clone finished, status: " << status.toString();
+ log() << "data clone finished, status: " << redact(status);
if (status.code() == ErrorCodes::CallbackCanceled) {
return;
}
if (!status.isOK()) {
// Initial sync failed during cloning of databases
- error() << "Failed to clone data due to '" << status << "'";
+ error() << "Failed to clone data due to '" << redact(status) << "'";
_initialSyncState->status = status;
_exec->signalEvent(_initialSyncState->finishEvent);
return;
@@ -1081,7 +1082,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() {
if (entry.getVersion() != OplogEntry::kOplogVersion) {
std::string message = str::stream()
<< "expected oplog version " << OplogEntry::kOplogVersion << " but found version "
- << entry.getVersion() << " in oplog entry: " << entry.raw;
+ << entry.getVersion() << " in oplog entry: " << redact(entry.raw);
severe() << message;
return {ErrorCodes::BadValue, message};
}
@@ -1131,7 +1132,7 @@ void DataReplicator::_onApplyBatchFinish(const Status& status,
if (!status.isOK()) {
switch (_state) {
case DataReplicatorState::InitialSync:
- error() << "Failed to apply batch due to '" << status << "'";
+ error() << "Failed to apply batch due to '" << redact(status) << "'";
_initialSyncState->status = status;
_exec->signalEvent(_initialSyncState->finishEvent);
return;
@@ -1193,7 +1194,7 @@ Status DataReplicator::_scheduleApplyBatch_inlock() {
auto batchStatus = _getNextApplierBatch_inlock();
if (!batchStatus.isOK()) {
- warning() << "Failure creating next apply batch: " << batchStatus.getStatus();
+ warning() << "Failure creating next apply batch: " << redact(batchStatus.getStatus());
return batchStatus.getStatus();
}
const Operations& ops = batchStatus.getValue();
@@ -1423,7 +1424,7 @@ void DataReplicator::_onOplogFetchFinish(const Status& status, const OpTimeWithH
invariant(!status.isOK());
if (_state == DataReplicatorState::InitialSync) {
// Do not change sync source, just log.
- error() << "Error fetching oplog during initial sync: " << status;
+ error() << "Error fetching oplog during initial sync: " << redact(status);
LockGuard lk(_mutex);
invariant(_initialSyncState);
_initialSyncState->status = status;
@@ -1459,8 +1460,9 @@ void DataReplicator::_onOplogFetchFinish(const Status& status, const OpTimeWithH
syncSource = _syncSource;
_syncSource = HostAndPort();
}
- log() << "Blacklisting " << syncSource << " due to fetcher error: '" << status
- << "' for " << _opts.blacklistSyncSourcePenaltyForNetworkConnectionError
+ log() << "Blacklisting " << syncSource << " due to fetcher error: '"
+ << redact(status) << "' for "
+ << _opts.blacklistSyncSourcePenaltyForNetworkConnectionError
<< " until: " << until;
_opts.syncSourceSelector->blacklistSyncSource(syncSource, until);
}
@@ -1480,7 +1482,7 @@ void DataReplicator::_rollbackOperations(const CallbackArgs& cbData) {
HostAndPort syncSource = getSyncSource();
auto rollbackStatus = _opts.rollbackFn(makeOpCtx().get(), lastOpTimeWritten.opTime, syncSource);
if (!rollbackStatus.isOK()) {
- error() << "Failed rollback: " << rollbackStatus;
+ error() << "Failed rollback: " << redact(rollbackStatus);
Date_t until{_exec->now() + _opts.blacklistSyncSourcePenaltyForOplogStartMissing};
HostAndPort syncSource;
{
@@ -1491,8 +1493,8 @@ void DataReplicator::_rollbackOperations(const CallbackArgs& cbData) {
_fetcherPaused = false;
}
log() << "Blacklisting host: " << syncSource << " during rollback due to error: '"
- << rollbackStatus << "' for " << _opts.blacklistSyncSourcePenaltyForOplogStartMissing
- << " until: " << until;
+ << redact(rollbackStatus) << "' for "
+ << _opts.blacklistSyncSourcePenaltyForOplogStartMissing << " until: " << until;
_opts.syncSourceSelector->blacklistSyncSource(syncSource, until);
} else {
// Go back to steady sync after a successful rollback.
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index baf4eeb4fce..a7b8bda03cc 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -353,7 +353,7 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
if (!startStatus.isOK()) {
LOG(1) << " failed to start collection cloning on "
- << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus;
+ << _currentCollectionClonerIter->getSourceNamespace() << ": " << redact(startStatus);
_finishCallback_inlock(lk, startStatus);
return;
}
@@ -385,7 +385,8 @@ void DatabaseCloner::_collectionClonerCallback(const Status& status, const Names
Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
if (!startStatus.isOK()) {
LOG(1) << " failed to start collection cloning on "
- << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus;
+ << _currentCollectionClonerIter->getSourceNamespace() << ": "
+ << redact(startStatus);
_finishCallback_inlock(lk, startStatus);
return;
}
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 005427d0ba4..353e81f2e60 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -515,7 +515,7 @@ void ReplSource::resync(OperationContext* txn, const std::string& dbName) {
return;
} else {
log() << "resync of " << db << " from " << hostName
- << " failed due to: " << status.toString();
+ << " failed due to: " << redact(status);
throw SyncException();
}
}
@@ -651,10 +651,12 @@ void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) {
}
}
} catch (UserException& e) {
- log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;
+ log() << "sync: caught user assertion " << redact(e) << " while applying op: " << redact(op)
+ << endl;
;
} catch (DBException& e) {
- log() << "sync: caught db exception " << e << " while applying op: " << op << endl;
+ log() << "sync: caught db exception " << redact(e) << " while applying op: " << redact(op)
+ << endl;
;
}
}
@@ -672,10 +674,12 @@ void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONO
}
}
} catch (UserException& e) {
- log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;
+ log() << "sync: caught user assertion " << redact(e) << " while applying op: " << redact(op)
+ << endl;
;
} catch (DBException& e) {
- log() << "sync: caught db exception " << e << " while applying op: " << op << endl;
+ log() << "sync: caught db exception " << redact(e) << " while applying op: " << redact(op)
+ << endl;
;
}
}
@@ -690,7 +694,7 @@ void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONO
void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
BSONObj& op,
bool alreadyLocked) {
- LOG(6) << "processing op: " << op << endl;
+ LOG(6) << "processing op: " << redact(op) << endl;
if (op.getStringField("op")[0] == 'n')
return;
@@ -700,11 +704,11 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
nsToDatabase(ns, dbName);
if (*ns == '.') {
- log() << "skipping bad op in oplog: " << op.toString() << endl;
+ log() << "skipping bad op in oplog: " << redact(op) << endl;
return;
} else if (*ns == 0) {
/*if( op.getStringField("op")[0] != 'n' )*/ {
- log() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
+ log() << "halting replication, bad op in oplog:\n " << redact(op) << endl;
replAllDead = "bad object in oplog";
throw SyncException();
}
@@ -949,7 +953,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
return restartSync; // don't sleep;
}
default: {
- error() << status;
+ error() << redact(status);
return forceReconnect; // causes reconnect.
}
}
@@ -983,11 +987,11 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
log() << "trying to slave off of a non-master" << '\n';
massert(13344, "trying to slave off of a non-master", false);
} else {
- error() << "$err reading remote oplog: " + err << '\n';
+ error() << "$err reading remote oplog: " + redact(err) << '\n';
massert(10390, "got $err reading remote oplog", false);
}
} else {
- error() << "bad object read from remote oplog: " << op.toString() << '\n';
+ error() << "bad object read from remote oplog: " << redact(op) << '\n';
massert(10391, "bad object read from remote oplog", false);
}
}
@@ -1067,7 +1071,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
BSONElement ts = op.getField("ts");
if (!(ts.type() == Date || ts.type() == bsonTimestamp)) {
log() << "sync error: problem querying remote oplog record" << endl;
- log() << "op: " << op.toString() << endl;
+ log() << "op: " << redact(op) << endl;
log() << "halting replication" << endl;
replInfo = replAllDead = "sync error: no ts found querying remote oplog record";
throw SyncException();
@@ -1213,17 +1217,17 @@ int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nAp
return 10;
} catch (AssertionException& e) {
if (e.severe()) {
- log() << "replMain AssertionException " << e.what() << endl;
+ log() << "replMain AssertionException " << redact(e) << endl;
return 60;
} else {
- log() << "AssertionException " << e.what() << endl;
+ log() << "AssertionException " << redact(e) << endl;
}
replInfo = "replMain caught AssertionException";
} catch (const DBException& e) {
- log() << "DBException " << e.what() << endl;
+ log() << "DBException " << redact(e) << endl;
replInfo = "replMain caught DBException";
} catch (const std::exception& e) {
- log() << "std::exception " << e.what() << endl;
+ log() << "std::exception " << redact(e.what()) << endl;
replInfo = "replMain caught std::exception";
} catch (...) {
log() << "unexpected exception during replication. replication will halt" << endl;
@@ -1423,7 +1427,7 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
}
} catch (DBException& e) {
log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' '
- << e.toString() << endl;
+ << redact(e) << endl;
}
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 92f4137198d..40fd4fe366a 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -314,7 +314,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) {
const auto tsElem = entry["ts"];
if (count == 1) {
if (tsElem.eoo())
- LOG(2) << "Oplog tail entry: " << entry;
+ LOG(2) << "Oplog tail entry: " << redact(entry);
else
LOG(2) << "Oplog tail entry ts field: " << tsElem;
}
@@ -653,7 +653,7 @@ Status applyOperation_inlock(OperationContext* txn,
const BSONObj& op,
bool convertUpdateToUpsert,
IncrementOpsAppliedStatsFn incrementOpsAppliedStats) {
- LOG(3) << "applying op: " << op;
+ LOG(3) << "applying op: " << redact(op);
OpCounters* opCounters = txn->writesAreReplicated() ? &globalOpCounters : &replOpCounters;
@@ -871,7 +871,7 @@ Status applyOperation_inlock(OperationContext* txn,
if (ur.modifiers) {
if (updateCriteria.nFields() == 1) {
// was a simple { _id : ... } update criteria
- string msg = str::stream() << "failed to apply update: " << op.toString();
+ string msg = str::stream() << "failed to apply update: " << redact(op);
error() << msg;
return Status(ErrorCodes::OperationFailed, msg);
}
@@ -887,7 +887,7 @@ Status applyOperation_inlock(OperationContext* txn,
// capped collections won't have an _id index
(!indexCatalog->haveIdIndex(txn) &&
Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
- string msg = str::stream() << "couldn't find doc: " << op.toString();
+ string msg = str::stream() << "couldn't find doc: " << redact(op);
error() << msg;
return Status(ErrorCodes::OperationFailed, msg);
}
@@ -899,7 +899,7 @@ Status applyOperation_inlock(OperationContext* txn,
// (because we are idempotent),
// if an regular non-mod update fails the item is (presumably) missing.
if (!upsert) {
- string msg = str::stream() << "update of non-mod failed: " << op.toString();
+ string msg = str::stream() << "update of non-mod failed: " << redact(op);
error() << msg;
return Status(ErrorCodes::OperationFailed, msg);
}
@@ -1024,12 +1024,12 @@ Status applyCommand_inlock(OperationContext* txn, const BSONObj& op) {
}
default:
if (_oplogCollectionName == masterSlaveOplogName) {
- error() << "Failed command " << o << " on " << nss.db() << " with status "
- << status << " during oplog application";
+ error() << "Failed command " << redact(o) << " on " << nss.db()
+ << " with status " << status << " during oplog application";
} else if (curOpToApply.acceptableErrors.find(status.code()) ==
curOpToApply.acceptableErrors.end()) {
- error() << "Failed command " << o << " on " << nss.db() << " with status "
- << status << " during oplog application";
+ error() << "Failed command " << redact(o) << " on " << nss.db()
+ << " with status " << status << " during oplog application";
return status;
}
// fallthrough
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 2641de8178d..f6f1e422718 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -319,7 +319,7 @@ void OplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
// if target cut connections between connecting and querying (for
// example, because it stepped down) we might not have a cursor
if (!result.isOK()) {
- LOG(2) << "Error returned from oplog query: " << result.getStatus();
+ LOG(2) << "Error returned from oplog query: " << redact(result.getStatus());
_onShutdown(result.getStatus());
return;
}
diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp
index 6ab73a44b01..edc9e63dba2 100644
--- a/src/mongo/db/repl/oplogreader.cpp
+++ b/src/mongo/db/repl/oplogreader.cpp
@@ -109,7 +109,7 @@ void OplogReader::query(
void OplogReader::tailingQuery(const char* ns, const BSONObj& query) {
verify(!haveCursor());
- LOG(2) << ns << ".find(" << query.toString() << ')' << endl;
+ LOG(2) << ns << ".find(" << redact(query) << ')' << endl;
cursor.reset(_conn->query(ns, query, 0, 0, nullptr, _tailingQueryOptions).release());
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 990b5fcf554..5c93123bdd4 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -144,7 +144,7 @@ bool _initialSyncClone(OperationContext* txn,
Status status = cloner.copyDb(txn, db, host, options, nullptr, collections);
if (!status.isOK()) {
log() << "initial sync: error while " << (dataPass ? "cloning " : "indexing ") << db
- << ". " << status.toString();
+ << ". " << redact(status);
return false;
}
@@ -532,7 +532,7 @@ void syncDoInitialSync(ReplicationCoordinatorExternalState* replicationCoordinat
error() << status;
}
} catch (const DBException& e) {
- error() << e;
+ error() << redact(e);
// Return if in shutdown
if (inShutdown()) {
return;
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index c03251f499d..fa69436ba33 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -178,13 +178,13 @@ Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
doc.ownedObj = ourObj.getOwned();
doc.ns = doc.ownedObj.getStringField("ns");
if (*doc.ns == '\0') {
- warning() << "ignoring op on rollback no ns TODO : " << doc.ownedObj.toString();
+ warning() << "ignoring op on rollback no ns TODO : " << redact(doc.ownedObj);
return Status::OK();
}
BSONObj obj = doc.ownedObj.getObjectField(*op == 'u' ? "o2" : "o");
if (obj.isEmpty()) {
- warning() << "ignoring op on rollback : " << doc.ownedObj.toString();
+ warning() << "ignoring op on rollback : " << redact(doc.ownedObj);
return Status::OK();
}
@@ -246,14 +246,14 @@ Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
continue;
}
- severe() << "cannot rollback a collMod command: " << obj;
+ severe() << "cannot rollback a collMod command: " << redact(obj);
throw RSFatalException();
}
return Status::OK();
} else if (cmdname == "applyOps") {
if (first.type() != Array) {
std::string message = str::stream()
- << "Expected applyOps argument to be an array; found " << first.toString();
+ << "Expected applyOps argument to be an array; found " << redact(first);
severe() << message;
return Status(ErrorCodes::UnrecoverableRollbackError, message);
}
@@ -261,7 +261,7 @@ Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
if (subopElement.type() != Object) {
std::string message = str::stream()
<< "Expected applyOps operations to be of Object type, but found "
- << subopElement.toString();
+ << redact(subopElement);
severe() << message;
return Status(ErrorCodes::UnrecoverableRollbackError, message);
}
@@ -272,7 +272,7 @@ Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
}
return Status::OK();
} else {
- severe() << "can't rollback this command yet: " << obj.toString();
+ severe() << "can't rollback this command yet: " << redact(obj);
log() << "cmdname=" << cmdname;
throw RSFatalException();
}
@@ -282,27 +282,27 @@ Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
if (nss.isSystemDotIndexes()) {
if (*op != 'i') {
severe() << "Unexpected operation type '" << *op << "' on system.indexes operation, "
- << "document: " << doc.ownedObj;
+ << "document: " << redact(doc.ownedObj);
throw RSFatalException();
}
string objNs;
auto status = bsonExtractStringField(obj, "ns", &objNs);
if (!status.isOK()) {
severe() << "Missing collection namespace in system.indexes operation, document: "
- << doc.ownedObj;
+ << redact(doc.ownedObj);
throw RSFatalException();
}
NamespaceString objNss(objNs);
if (!objNss.isValid()) {
severe() << "Invalid collection namespace in system.indexes operation, document: "
- << doc.ownedObj;
+ << redact(doc.ownedObj);
throw RSFatalException();
}
string indexName;
status = bsonExtractStringField(obj, "name", &indexName);
if (!status.isOK()) {
severe() << "Missing index name in system.indexes operation, document: "
- << doc.ownedObj;
+ << redact(doc.ownedObj);
throw RSFatalException();
}
using ValueType = multimap<string, string>::value_type;
@@ -319,7 +319,7 @@ Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
doc._id = obj["_id"];
if (doc._id.eoo()) {
severe() << "cannot rollback op with no _id. ns: " << doc.ns
- << ", document: " << doc.ownedObj;
+ << ", document: " << redact(doc.ownedObj);
throw RSFatalException();
}
@@ -368,8 +368,8 @@ void syncFixUp(OperationContext* txn,
return;
}
} catch (const DBException& e) {
- LOG(1) << "rollback re-get objects: " << e.toString();
- error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << doc._id << ' '
+ LOG(1) << "rollback re-get objects: " << redact(e);
+ error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << redact(doc._id) << ' '
<< numFetched << '/' << fixUpInfo.toRefetch.size();
throw e;
}
@@ -514,7 +514,7 @@ void syncFixUp(OperationContext* txn,
err += "rbid at primary changed during resync/rollback";
}
if (!err.empty()) {
- severe() << "rolling back : " << err << ". A full resync will be necessary.";
+ severe() << "rolling back : " << redact(err) << ". A full resync will be necessary.";
// TODO: reset minvalid so that we are permanently in fatal state
// TODO: don't be fatal, but rather, get all the data first.
throw RSFatalException();
@@ -661,7 +661,7 @@ void syncFixUp(OperationContext* txn,
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
severe() << "rollback cannot write document in namespace " << doc.ns
- << " to archive file: " << status;
+ << " to archive file: " << redact(status);
throw RSFatalException();
}
} else {
@@ -713,7 +713,7 @@ void syncFixUp(OperationContext* txn,
}
} catch (const DBException& e) {
error() << "rolling back capped collection rec " << doc.ns << ' '
- << e.toString();
+ << redact(e);
}
} else {
deleteObjects(txn,
@@ -762,7 +762,7 @@ void syncFixUp(OperationContext* txn,
}
} catch (const DBException& e) {
log() << "exception in rollback ns:" << doc.ns << ' ' << pattern.toString() << ' '
- << e.toString() << " ndeletes:" << deletes;
+ << redact(e) << " ndeletes:" << deletes;
warn = true;
}
}
@@ -858,7 +858,7 @@ Status _syncRollback(OperationContext* txn,
how.commonPointOurDiskloc = res.getValue().second;
}
} catch (const RSFatalException& e) {
- error() << string(e.what());
+ error() << redact(e.what());
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream()
<< "need to rollback, but unable to determine common point between"
@@ -866,7 +866,7 @@ Status _syncRollback(OperationContext* txn,
<< e.what(),
18752);
} catch (const DBException& e) {
- warning() << "rollback 2 exception " << e.toString() << "; sleeping 1 min";
+ warning() << "rollback 2 exception " << redact(e) << "; sleeping 1 min";
sleepSecondsFn(Seconds(60));
throw;
@@ -879,7 +879,7 @@ Status _syncRollback(OperationContext* txn,
try {
syncFixUp(txn, how, rollbackSource, replCoord);
} catch (const RSFatalException& e) {
- error() << "exception during rollback: " << e.what();
+ error() << "exception during rollback: " << redact(e.what());
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream() << "exception during rollback: " << e.what(),
18753);
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index f810098b843..5a1c98574d2 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -110,7 +110,7 @@ void RSDataSync::_run() {
SyncTail(_bgsync, multiSyncApply).oplogApplication(_replCoord);
} catch (...) {
auto status = exceptionToStatus();
- severe() << "Exception thrown in RSDataSync: " << status;
+ severe() << "Exception thrown in RSDataSync: " << redact(status);
std::terminate();
}
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index ef0f41111bc..4d909e627fe 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -171,7 +171,7 @@ OpTime StorageInterfaceImpl::getMinValid(OperationContext* txn) const {
}
if (!opTimeStatus.isOK()) {
- severe() << "Error parsing minvalid entry: " << doc
+ severe() << "Error parsing minvalid entry: " << redact(doc)
<< ", with status:" << opTimeStatus.getStatus();
fassertFailedNoTrace(40052);
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 2f61adaab54..113e580450e 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -304,7 +304,7 @@ Status SyncTail::syncApply(OperationContext* txn,
// this is often a no-op
// but can't be 100% sure
if (!isNoOp) {
- error() << "skipping bad op in oplog: " << op.toString();
+ error() << "skipping bad op in oplog: " << redact(op);
}
return Status::OK();
}
@@ -385,7 +385,7 @@ Status SyncTail::syncApply(OperationContext* txn,
// unknown opType
str::stream ss;
- ss << "bad opType '" << opType << "' in oplog entry: " << op.toString();
+ ss << "bad opType '" << opType << "' in oplog entry: " << redact(op);
error() << std::string(ss);
return Status(ErrorCodes::BadValue, ss);
}
@@ -419,9 +419,9 @@ void prefetchOp(const BSONObj& op) {
prefetchPagesForReplicatedOp(&txn, db, op);
}
} catch (const DBException& e) {
- LOG(2) << "ignoring exception in prefetchOp(): " << e.what() << endl;
+ LOG(2) << "ignoring exception in prefetchOp(): " << redact(e) << endl;
} catch (const std::exception& e) {
- log() << "Unhandled std::exception in prefetchOp(): " << e.what() << endl;
+ log() << "Unhandled std::exception in prefetchOp(): " << redact(e.what()) << endl;
fassertFailed(16397);
}
}
@@ -865,7 +865,8 @@ bool SyncTail::tryPopAndWaitForMore(OperationContext* txn,
if (curVersion != OplogEntry::kOplogVersion) {
severe() << "expected oplog version " << OplogEntry::kOplogVersion
- << " but found version " << curVersion << " in oplog entry: " << entry.raw;
+ << " but found version " << curVersion
+ << " in oplog entry: " << redact(entry.raw);
fassertFailedNoTrace(18820);
}
}
@@ -961,7 +962,7 @@ BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONO
const BSONElement idElem = o.getObjectField(isUpdate ? "o2" : "o")["_id"];
if (idElem.eoo()) {
- severe() << "cannot fetch missing document without _id field: " << o.toString();
+ severe() << "cannot fetch missing document without _id field: " << redact(o);
fassertFailedNoTrace(28742);
}
@@ -974,7 +975,7 @@ BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONO
<< "sync source, attempt " << retryCount << " of " << retryMax << endl;
continue; // try again
} catch (DBException& e) {
- error() << "assertion fetching missing object: " << e.what() << endl;
+ error() << "assertion fetching missing object: " << redact(e) << endl;
throw;
}
@@ -1002,7 +1003,7 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
if (missingObj.isEmpty()) {
log() << "missing object not found on source."
" presumably deleted later in oplog";
- log() << "o2: " << o.getObjectField("o2").toString();
+ log() << "o2: " << redact(o.getObjectField("o2"));
log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName();
return false;
@@ -1018,7 +1019,7 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
str::stream() << "failed to insert missing doc: " << status.toString(),
status.isOK());
- LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;
+ LOG(1) << "inserted missing doc: " << redact(missingObj);
wunit.commit();
return true;
@@ -1113,7 +1114,7 @@ Status multiSyncApply_noAbort(OperationContext* txn,
} catch (const DBException& e) {
// The group insert failed, log an error and fall through to the
// application of an individual op.
- error() << "Error applying inserts in bulk " << causedBy(e)
+ error() << "Error applying inserts in bulk " << redact(e)
<< " trying first insert as a lone insert";
// Avoid quadratic run time from failed insert by not retrying until we
@@ -1128,13 +1129,13 @@ Status multiSyncApply_noAbort(OperationContext* txn,
const Status status = syncApply(txn, entry->raw, convertUpdatesToUpserts);
if (!status.isOK()) {
- severe() << "Error applying operation (" << entry->raw.toString()
- << "): " << status;
+ severe() << "Error applying operation (" << redact(entry->raw)
+ << "): " << redact(status);
return status;
}
} catch (const DBException& e) {
- severe() << "writer worker caught exception: " << causedBy(e)
- << " on: " << entry->raw.toString();
+ severe() << "writer worker caught exception: " << redact(e)
+ << " on: " << redact(entry->raw);
return e.toStatus();
}
}
@@ -1168,7 +1169,8 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn,
if (st->shouldRetry(txn, entry.raw)) {
const Status s2 = SyncTail::syncApply(txn, entry.raw, convertUpdatesToUpserts);
if (!s2.isOK()) {
- severe() << "Error applying operation (" << entry.raw << "): " << s2;
+ severe() << "Error applying operation (" << redact(entry.raw)
+ << "): " << redact(s2);
return s2;
}
}
@@ -1187,7 +1189,8 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn,
continue;
}
- severe() << "writer worker caught exception: " << causedBy(e) << " on: " << entry.raw;
+ severe() << "writer worker caught exception: " << redact(e)
+ << " on: " << redact(entry.raw);
return e.toStatus();
}
}
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 8619db6c880..2f0bc184738 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -65,7 +65,7 @@ TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task,
try {
return task(txn, status);
} catch (...) {
- log() << "Unhandled exception in task runner: " << exceptionToStatus();
+ log() << "Unhandled exception in task runner: " << redact(exceptionToStatus());
}
return TaskRunner::NextAction::kCancel;
}
@@ -232,7 +232,7 @@ Status TaskRunner::runSynchronousTask(SynchronousTask func, TaskRunner::NextActi
log() << "done running the synchronous task.";
} catch (...) {
returnStatus = exceptionToStatus();
- error() << "Exception thrown in runSynchronousTask: " << returnStatus;
+ error() << "Exception thrown in runSynchronousTask: " << redact(returnStatus);
}
}