summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Schubert <thomas.schubert@mongodb.com>2016-08-24 16:01:07 -0400
committerThomas Schubert <thomas.schubert@mongodb.com>2016-09-02 16:38:54 -0400
commit81bc570b41f59f8a7101df13892272bae6da9857 (patch)
treec3bf61103fbf399ca1f165afd94296da8132d31b
parentfba18b66c7140178fd5b4efdd42d51fb9e0fc0ae (diff)
downloadmongo-81bc570b41f59f8a7101df13892272bae6da9857.tar.gz
SERVER-24991 log redaction for bson, client, auth, catalog
-rw-r--r--etc/log_redaction.audit84
-rw-r--r--src/mongo/bson/bsonobj.cpp1
-rw-r--r--src/mongo/client/dbclient_rs.cpp17
-rw-r--r--src/mongo/client/fetcher.cpp7
-rw-r--r--src/mongo/client/parallel.cpp24
-rw-r--r--src/mongo/client/replica_set_monitor.cpp6
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp4
-rw-r--r--src/mongo/db/auth/authorization_session.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp16
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp3
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp7
-rw-r--r--src/mongo/db/catalog/collection.cpp4
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/database.cpp16
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp14
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp2
-rw-r--r--src/mongo/db/catalog/index_create.cpp5
-rw-r--r--src/mongo/db/cloner.cpp16
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp19
19 files changed, 122 insertions, 128 deletions
diff --git a/etc/log_redaction.audit b/etc/log_redaction.audit
index 96c65d9e587..4039f8eb068 100644
--- a/etc/log_redaction.audit
+++ b/etc/log_redaction.audit
@@ -17,53 +17,53 @@
# - second entry: ramon has to verify client/fetcher.cpp
# - any line starting with ':' means file needs verification
#
-:thomas:src/mongo/base/secure_allocator.cpp:8
-:thomas:src/mongo/bson/bsonelement.cpp:1
-:thomas:src/mongo/bson/bsonobj.cpp:1
-:thomas:src/mongo/bson/bsonobjbuilder.cpp:2
-:thomas:src/mongo/bson/json.cpp:1
-:thomas:src/mongo/client/authenticate.cpp:1
-:thomas:src/mongo/client/connection_string_connect.cpp:3
-:thomas:src/mongo/client/connpool.cpp:4
-:thomas:src/mongo/client/dbclient.cpp:8
-:thomas:src/mongo/client/dbclient_rs.cpp:27
-:thomas:src/mongo/client/dbclientcursor.cpp:4
+20160824:thomas:src/mongo/base/secure_allocator.cpp:8
+20160824:thomas:src/mongo/bson/bsonelement.cpp:1
+20160824:thomas:src/mongo/bson/bsonobj.cpp:1
+20160824:thomas:src/mongo/bson/bsonobjbuilder.cpp:2
+20160824:thomas:src/mongo/bson/json.cpp:1
+20160824:thomas:src/mongo/client/authenticate.cpp:1
+20160824:thomas:src/mongo/client/connection_string_connect.cpp:3
+20160824:thomas:src/mongo/client/connpool.cpp:4
+20160824:thomas:src/mongo/client/dbclient.cpp:8
+20160824:thomas:src/mongo/client/dbclient_rs.cpp:27
+20160824:thomas:src/mongo/client/dbclientcursor.cpp:4
:thomas:src/mongo/client/dbclientinterface.h:1
-:thomas:src/mongo/client/fetcher.cpp:3
-:thomas:src/mongo/client/parallel.cpp:40
-:thomas:src/mongo/client/replica_set_monitor.cpp:19
-:thomas:src/mongo/client/replica_set_monitor_manager.cpp:3
-:thomas:src/mongo/client/sasl_client_authenticate_impl.cpp:2
+20160824:thomas:src/mongo/client/fetcher.cpp:3
+20160824:thomas:src/mongo/client/parallel.cpp:40
+20160824:thomas:src/mongo/client/replica_set_monitor.cpp:19
+20160824:thomas:src/mongo/client/replica_set_monitor_manager.cpp:3
+20160824:thomas:src/mongo/client/sasl_client_authenticate_impl.cpp:2
:thomas:src/mongo/client/sasl_sspi.cpp:2
-:thomas:src/mongo/db/auth/auth_index_d.cpp:3
-:thomas:src/mongo/db/auth/authorization_manager.cpp:2
-:thomas:src/mongo/db/auth/authorization_session.cpp:3
-:thomas:src/mongo/db/auth/authz_manager_external_state_local.cpp:7
-:thomas:src/mongo/db/auth/authz_session_external_state_server_common.cpp:1
-:thomas:src/mongo/db/auth/role_graph_update.cpp:1
-:thomas:src/mongo/db/auth/sasl_commands.cpp:2
-:thomas:src/mongo/db/auth/sasl_options.cpp:1
-:thomas:src/mongo/db/auth/security_key.cpp:2
-:thomas:src/mongo/db/auth/user_cache_invalidator_job.cpp:5
-:thomas:src/mongo/db/auth/user_document_parser.cpp:4
-:thomas:src/mongo/db/catalog/apply_ops.cpp:1
-:thomas:src/mongo/db/catalog/coll_mod.cpp:2
-:thomas:src/mongo/db/catalog/collection.cpp:6
+20160824:thomas:src/mongo/db/auth/auth_index_d.cpp:3
+20160824:thomas:src/mongo/db/auth/authorization_manager.cpp:2
+20160824:thomas:src/mongo/db/auth/authorization_session.cpp:3
+20160824:thomas:src/mongo/db/auth/authz_manager_external_state_local.cpp:7
+20160824:thomas:src/mongo/db/auth/authz_session_external_state_server_common.cpp:1
+20160824:thomas:src/mongo/db/auth/role_graph_update.cpp:1
+20160824:thomas:src/mongo/db/auth/sasl_commands.cpp:2
+20160824:thomas:src/mongo/db/auth/sasl_options.cpp:1
+20160824:thomas:src/mongo/db/auth/security_key.cpp:2
+20160824:thomas:src/mongo/db/auth/user_cache_invalidator_job.cpp:5
+20160824:thomas:src/mongo/db/auth/user_document_parser.cpp:4
+20160824:thomas:src/mongo/db/catalog/apply_ops.cpp:1
+20160824:thomas:src/mongo/db/catalog/coll_mod.cpp:2
+20160824:thomas:src/mongo/db/catalog/collection.cpp:6
:thomas:src/mongo/db/catalog/collection.h:2
-:thomas:src/mongo/db/catalog/collection_compact.cpp:4
-:thomas:src/mongo/db/catalog/collection_info_cache.cpp:5
-:thomas:src/mongo/db/catalog/database.cpp:19
+20160824:thomas:src/mongo/db/catalog/collection_compact.cpp:4
+20160824:thomas:src/mongo/db/catalog/collection_info_cache.cpp:5
+20160824:thomas:src/mongo/db/catalog/database.cpp:19
:thomas:src/mongo/db/catalog/database.h:1
-:thomas:src/mongo/db/catalog/database_holder.cpp:2
-:thomas:src/mongo/db/catalog/drop_collection.cpp:2
-:thomas:src/mongo/db/catalog/drop_database.cpp:2
-:thomas:src/mongo/db/catalog/drop_indexes.cpp:4
-:thomas:src/mongo/db/catalog/index_catalog.cpp:14
+20160824:thomas:src/mongo/db/catalog/database_holder.cpp:2
+20160824:thomas:src/mongo/db/catalog/drop_collection.cpp:2
+20160824:thomas:src/mongo/db/catalog/drop_database.cpp:2
+20160824:thomas:src/mongo/db/catalog/drop_indexes.cpp:4
+20160824:thomas:src/mongo/db/catalog/index_catalog.cpp:14
:thomas:src/mongo/db/catalog/index_catalog.h:1
-:thomas:src/mongo/db/catalog/index_catalog_entry.cpp:2
-:thomas:src/mongo/db/catalog/index_create.cpp:12
-:thomas:src/mongo/db/catalog/rename_collection.cpp:2
-:thomas:src/mongo/db/cloner.cpp:20
+20160824:thomas:src/mongo/db/catalog/index_catalog_entry.cpp:2
+20160824:thomas:src/mongo/db/catalog/index_create.cpp:12
+20160824:thomas:src/mongo/db/catalog/rename_collection.cpp:2
+20160824:thomas:src/mongo/db/cloner.cpp:20
20160829:thomas:src/mongo/db/commands.cpp:5
20160829:thomas:src/mongo/db/commands/authentication_commands.cpp:2
20160829:thomas:src/mongo/db/commands/clone_collection.cpp:1
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index c3a561b0cdf..0d965e364a5 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -473,7 +473,6 @@ void BSONObj::dump() const {
builder << i << '\t' << (0xff & ((unsigned)*p));
if (*p >= 'A' && *p <= 'z')
builder << '\t' << *p;
- builder << endl;
p++;
}
}
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index ffca94992e5..584d4c61f10 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -536,7 +536,7 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const string& ns,
} catch (const DBException& dbExcep) {
StringBuilder errMsgBuilder;
errMsgBuilder << "can't query replica set node " << _lastSlaveOkHost.toString()
- << ": " << causedBy(dbExcep);
+ << ": " << causedBy(redact(dbExcep));
lastNodeErrMsg = errMsgBuilder.str();
LOG(1) << lastNodeErrMsg << endl;
@@ -588,7 +588,7 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
} catch (const DBException& dbExcep) {
StringBuilder errMsgBuilder;
errMsgBuilder << "can't findone replica set node " << _lastSlaveOkHost.toString()
- << ": " << causedBy(dbExcep);
+ << ": " << causedBy(redact(dbExcep));
lastNodeErrMsg = errMsgBuilder.str();
LOG(1) << lastNodeErrMsg << endl;
@@ -672,7 +672,7 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags(
auto selectedNodeStatus = monitor->getHostOrRefresh(*readPref);
if (!selectedNodeStatus.isOK()) {
LOG(3) << "dbclient_rs no compatible node found"
- << causedBy(selectedNodeStatus.getStatus());
+ << causedBy(redact(selectedNodeStatus.getStatus()));
return nullptr;
}
@@ -773,7 +773,8 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
} catch (const DBException& DBExcep) {
StringBuilder errMsgBuilder;
errMsgBuilder << "can't callLazy replica set node "
- << _lastSlaveOkHost.toString() << ": " << causedBy(DBExcep);
+ << _lastSlaveOkHost.toString() << ": "
+ << causedBy(redact(DBExcep));
lastNodeErrMsg = errMsgBuilder.str();
LOG(1) << lastNodeErrMsg << endl;
@@ -817,8 +818,8 @@ bool DBClientReplicaSet::recv(Message& m) {
try {
return _lazyState._lastClient->recv(m);
} catch (DBException& e) {
- log() << "could not receive data from " << _lazyState._lastClient->toString() << causedBy(e)
- << endl;
+ log() << "could not receive data from " << _lazyState._lastClient->toString()
+ << causedBy(redact(e));
return false;
}
}
@@ -868,7 +869,7 @@ void DBClientReplicaSet::checkResponse(const char* data,
} else if (_lazyState._lastClient == _master.get()) {
isntMaster();
} else {
- warning() << "passed " << dataObj << " but last rs client "
+ warning() << "passed " << redact(dataObj) << " but last rs client "
<< _lazyState._lastClient->toString() << " is not master or secondary"
<< endl;
}
@@ -995,7 +996,7 @@ bool DBClientReplicaSet::call(Message& toSend,
return conn->call(toSend, response, assertOk, nullptr);
} catch (const DBException& dbExcep) {
LOG(1) << "can't call replica set node " << _lastSlaveOkHost << ": "
- << causedBy(dbExcep) << endl;
+ << causedBy(redact(dbExcep));
if (actualServer)
*actualServer = "";
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 550ebd8f409..6104ba667db 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -380,19 +380,20 @@ void Fetcher::_sendKillCursors(const CursorId id, const NamespaceString& nss) {
if (id) {
auto logKillCursorsResult = [](const RemoteCommandCallbackArgs& args) {
if (!args.response.isOK()) {
- warning() << "killCursors command task failed: " << args.response.status;
+ warning() << "killCursors command task failed: " << redact(args.response.status);
return;
}
auto status = getStatusFromCommandResult(args.response.data);
if (!status.isOK()) {
- warning() << "killCursors command failed: " << status;
+ warning() << "killCursors command failed: " << redact(status);
}
};
auto cmdObj = BSON("killCursors" << nss.coll() << "cursors" << BSON_ARRAY(id));
auto scheduleResult = _executor->scheduleRemoteCommand(
RemoteCommandRequest(_source, _dbname, cmdObj, nullptr), logKillCursorsResult);
if (!scheduleResult.isOK()) {
- warning() << "failed to schedule killCursors command: " << scheduleResult.getStatus();
+ warning() << "failed to schedule killCursors command: "
+ << redact(scheduleResult.getStatus());
}
}
}
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 3fca978c745..986d642ecdc 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -645,7 +645,6 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
mdata.finished = true;
}
-
LOG(pc) << "initialized " << (isCommand() ? "command " : "query ")
<< (lazyInit ? "(lazily) " : "(full) ") << "on shard " << shardId
<< ", current connection state is " << mdata.toBSON();
@@ -665,7 +664,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
int logLevel = fullReload ? 0 : 1;
LOG(pc + logLevel) << "stale config of ns " << staleNS
<< " during initialization, will retry with forced : " << forceReload
- << ", full : " << fullReload << causedBy(e);
+ << ", full : " << fullReload << causedBy(redact(e));
// This is somewhat strange
if (staleNS != nss)
@@ -679,7 +678,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
return;
} catch (SocketException& e) {
warning() << "socket exception when initializing on " << shardId
- << ", current connection state is " << mdata.toBSON() << causedBy(e);
+ << ", current connection state is " << mdata.toBSON() << causedBy(redact(e));
e._shard = shardId.toString();
mdata.errored = true;
if (returnPartial) {
@@ -689,7 +688,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
throw;
} catch (DBException& e) {
warning() << "db exception when initializing on " << shardId
- << ", current connection state is " << mdata.toBSON() << causedBy(e);
+ << ", current connection state is " << mdata.toBSON() << causedBy(redact(e));
e._shard = shardId.toString();
mdata.errored = true;
if (returnPartial && e.getCode() == 15925 /* From above! */) {
@@ -828,7 +827,7 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
continue;
} catch (SocketException& e) {
warning() << "socket exception when finishing on " << shardId
- << ", current connection state is " << mdata.toBSON() << causedBy(e);
+ << ", current connection state is " << mdata.toBSON() << causedBy(redact(e));
mdata.errored = true;
if (returnPartial) {
mdata.cleanup(true);
@@ -840,7 +839,8 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
// ABOVE
if (e.getCode() == 15988) {
warning() << "exception when receiving data from " << shardId
- << ", current connection state is " << mdata.toBSON() << causedBy(e);
+ << ", current connection state is " << mdata.toBSON()
+ << causedBy(redact(e));
mdata.errored = true;
if (returnPartial) {
@@ -853,10 +853,11 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
// don't print/call "mdata.toBSON()" to avoid unexpected errors e.g. a segfault
if (e.getCode() == 22)
warning() << "bson is malformed :: db exception when finishing on " << shardId
- << causedBy(e);
+ << causedBy(redact(e));
else
warning() << "db exception when finishing on " << shardId
- << ", current connection state is " << mdata.toBSON() << causedBy(e);
+ << ", current connection state is " << mdata.toBSON()
+ << causedBy(redact(e));
mdata.errored = true;
throw;
}
@@ -891,7 +892,7 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
LOG(pc + logLevel)
<< "stale config of ns " << staleNS
<< " on finishing query, will retry with forced : " << forceReload
- << ", full : " << fullReload << causedBy(exception);
+ << ", full : " << fullReload << causedBy(redact(exception));
// This is somewhat strange
if (staleNS != ns)
@@ -1062,7 +1063,8 @@ void ParallelSortClusteredCursor::_oldInit() {
}
LOG(5) << "ParallelSortClusteredCursor::init server:" << serverHost << " ns:" << _ns
- << " query:" << _query << " fields:" << _fields << " options: " << _options;
+ << " query:" << redact(_query) << " fields:" << redact(_fields)
+ << " options: " << _options;
if (!_cursors[i].get())
_cursors[i].reset(
@@ -1214,7 +1216,7 @@ void ParallelSortClusteredCursor::_oldInit() {
} else if (throwException) {
throw DBException(errMsg.str(), 14827);
} else {
- warning() << errMsg.str();
+ warning() << redact(errMsg.str());
}
}
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index d23ad5f48fa..7ad237e9987 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -195,7 +195,7 @@ void ReplicaSetMonitor::init() {
if (!status.isOK()) {
severe() << "Can't start refresh for replica set " << getName()
- << causedBy(status.getStatus());
+ << causedBy(redact(status.getStatus()));
fassertFailed(40139);
}
@@ -246,7 +246,7 @@ void ReplicaSetMonitor::_refresh(const CallbackArgs& cbArgs) {
if (!status.isOK()) {
severe() << "Can't continue refresh for replica set " << getName() << " due to "
- << status.getStatus().toString();
+ << redact(status.getStatus());
fassertFailed(40140);
}
@@ -677,7 +677,7 @@ bool Refresher::receivedIsMasterFromMaster(const IsMasterReply& reply) {
!std::equal(
_set->nodes.begin(), _set->nodes.end(), reply.normalHosts.begin(), hostsEqual)) {
LOG(2) << "Adjusting nodes in our view of replica set " << _set->name
- << " based on master reply: " << reply.raw;
+ << " based on master reply: " << redact(reply.raw);
// remove non-members from _set->nodes
_set->nodes.erase(
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index 8f404e7ed8c..df560952d68 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -285,7 +285,7 @@ Status AuthorizationManager::getAuthorizationVersion(OperationContext* txn, int*
guard.endFetchPhase();
if (!status.isOK()) {
warning() << "Problem fetching the stored schema version of authorization data: "
- << status;
+ << redact(status);
*version = schemaVersionInvalid;
return status;
}
@@ -722,7 +722,7 @@ void AuthorizationManager::_invalidateRelevantCacheData(const char* op,
if (!userName.isOK()) {
warning() << "Invalidating user cache based on user being updated failed, will "
"invalidate the entire cache instead: "
- << userName.getStatus() << endl;
+ << userName.getStatus();
invalidateUserCache();
return;
}
diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp
index 271561cc06a..0a801445c31 100644
--- a/src/mongo/db/auth/authorization_session.cpp
+++ b/src/mongo/db/auth/authorization_session.cpp
@@ -711,7 +711,8 @@ void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* txn) {
// Unrecognized error; assume that it's transient, and continue working with the
// out-of-date privilege data.
warning() << "Could not fetch updated user privilege information for " << name
- << "; continuing to use old information. Reason is " << status;
+ << "; continuing to use old information. Reason is "
+ << redact(status);
break;
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index df38132a76c..a55c734b353 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -50,11 +50,11 @@ Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
if (status == ErrorCodes::GraphContainsCycle) {
error() << "Cycle detected in admin.system.roles; role inheritance disabled. "
"Remove the listed cycle and any others to re-enable role inheritance. "
- << status.reason();
+ << redact(status);
} else {
error() << "Could not generate role graph from admin.system.roles; "
"only system roles available: "
- << status;
+ << redact(status);
}
}
@@ -392,7 +392,7 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
if (!status.isOK()) {
warning() << "Skipping invalid admin.system.roles document while calculating privileges"
" for user-defined roles: "
- << status << "; document " << doc;
+ << redact(status) << "; document " << redact(doc);
}
}
@@ -421,7 +421,7 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* tx
if (status == ErrorCodes::GraphContainsCycle) {
error() << "Inconsistent role graph during authorization manager initialization. Only "
"direct privileges available. "
- << status.reason();
+ << redact(status);
newState = roleGraphStateHasCycle;
status = Status::OK();
} else if (status.isOK()) {
@@ -470,17 +470,17 @@ public:
oplogEntryBuilder << "o2" << _o2;
error() << "Unsupported modification to roles collection in oplog; "
"restart this process to reenable user-defined roles; "
- << status.reason() << "; Oplog entry: " << oplogEntryBuilder.done();
+ << redact(status) << "; Oplog entry: " << redact(oplogEntryBuilder.done());
} else if (!status.isOK()) {
- warning() << "Skipping bad update to roles collection in oplog. " << status
- << " Oplog entry: " << _op;
+ warning() << "Skipping bad update to roles collection in oplog. " << redact(status)
+ << " Oplog entry: " << redact(_op);
}
status = _externalState->_roleGraph.recomputePrivilegeData();
if (status == ErrorCodes::GraphContainsCycle) {
_externalState->_roleGraphState = _externalState->roleGraphStateHasCycle;
error() << "Inconsistent role graph during authorization manager initialization. "
"Only direct privileges available. "
- << status.reason() << " after applying oplog entry " << _op;
+ << redact(status) << " after applying oplog entry " << redact(_op);
} else {
fassert(17183, status);
_externalState->_roleGraphState = _externalState->roleGraphStateConsistent;
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index ccf26f11643..c4edf1f9a0f 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -181,8 +181,7 @@ Status doSaslStep(const Client* client,
if (!status.isOK()) {
log() << session->getMechanism() << " authentication failed for "
<< session->getPrincipalId() << " on " << session->getAuthenticationDatabase()
- << " from client " << client->getRemote().toString() << " ; " << status.toString()
- << std::endl;
+ << " from client " << client->getRemote().toString() << " ; " << redact(status);
sleepmillis(saslGlobalParams.authFailedDelay);
// All the client needs to know is that authentication has failed.
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 29283c7ed91..f941e1d87b1 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -163,12 +163,11 @@ void UserCacheInvalidator::run() {
if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
warning() << "_getUserCacheGeneration command not found on config server(s), "
"this most likely means you are running an outdated version of mongod "
- "on the config servers"
- << std::endl;
+ "on the config servers";
} else {
warning() << "An error occurred while fetching current user cache generation "
"to check if user cache needs invalidation: "
- << currentGeneration.getStatus() << std::endl;
+ << currentGeneration.getStatus();
}
// When in doubt, invalidate the cache
_authzManager->invalidateUserCache();
@@ -177,7 +176,7 @@ void UserCacheInvalidator::run() {
if (currentGeneration.getValue() != _previousCacheGeneration) {
log() << "User cache generation changed from " << _previousCacheGeneration << " to "
- << currentGeneration.getValue() << "; invalidating user cache" << std::endl;
+ << currentGeneration.getValue() << "; invalidating user cache";
_authzManager->invalidateUserCache();
_previousCacheGeneration = currentGeneration.getValue();
}
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 2e4c2154385..8d82d6ff395 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -303,7 +303,7 @@ Status Collection::checkValidation(OperationContext* txn, const BSONObj& documen
if (_validationAction == WARN) {
warning() << "Document would fail validation"
- << " collection: " << ns() << " doc: " << document;
+ << " collection: " << ns() << " doc: " << redact(document);
return Status::OK();
}
@@ -539,7 +539,7 @@ Status Collection::aboutToDeleteCapped(OperationContext* txn,
void Collection::deleteDocument(
OperationContext* txn, const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn) {
if (isCapped()) {
- log() << "failing remove on a capped ns " << _ns << endl;
+ log() << "failing remove on a capped ns " << _ns;
uasserted(10089, "cannot remove from a capped collection");
return;
}
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index e61e8606b84..afba5e94987 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -168,7 +168,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
WriteUnitOfWork wunit(txn);
- log() << "compact dropping indexes" << endl;
+ log() << "compact dropping indexes";
Status status = _indexCatalog.dropAllIndexes(txn, true);
if (!status.isOK()) {
return StatusWith<CompactStats>(status);
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index c9482569f73..b8fce8830fe 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -145,7 +145,7 @@ void Database::close(OperationContext* txn) {
repl::oplogCheckCloseDatabase(txn, this);
if (BackgroundOperation::inProgForDb(_name)) {
- log() << "warning: bg op in prog during close db? " << _name << endl;
+ log() << "warning: bg op in prog during close db? " << _name;
}
}
@@ -208,7 +208,7 @@ Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry*
_views(&_durableViews) {
Status status = validateDBName(_name);
if (!status.isOK()) {
- warning() << "tried to open invalid db: " << _name << endl;
+ warning() << "tried to open invalid db: " << _name;
uasserted(10028, status.toString());
}
@@ -227,7 +227,7 @@ Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry*
_views.invalidate();
Status reloadStatus = _views.reloadIfNeeded(txn);
if (!reloadStatus.isOK()) {
- warning() << "Unable to parse views: " << reloadStatus
+ warning() << "Unable to parse views: " << redact(reloadStatus)
<< "; remove any invalid views from the " << _viewsName
<< " collection to restore server functionality." << startupWarningsLog;
}
@@ -280,7 +280,7 @@ void Database::clearTmpCollections(OperationContext* txn) {
WriteUnitOfWork wunit(txn);
Status status = dropCollection(txn, ns);
if (!status.isOK()) {
- warning() << "could not drop temp collection '" << ns << "': " << status;
+ warning() << "could not drop temp collection '" << ns << "': " << redact(status);
continue;
}
@@ -369,7 +369,7 @@ Status Database::dropView(OperationContext* txn, StringData fullns) {
Status Database::dropCollection(OperationContext* txn, StringData fullns) {
invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- LOG(1) << "dropCollection: " << fullns << endl;
+ LOG(1) << "dropCollection: " << fullns;
massertNamespaceNotIndex(fullns, "dropCollection");
Collection* collection = getCollection(fullns);
@@ -408,12 +408,12 @@ Status Database::dropCollection(OperationContext* txn, StringData fullns) {
Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
if (!s.isOK()) {
warning() << "could not drop collection, trying to drop indexes" << fullns << " because of "
- << s.toString();
+ << redact(s.toString());
return s;
}
verify(collection->_details->getTotalIndexCount(txn) == 0);
- LOG(1) << "\t dropIndexes done" << endl;
+ LOG(1) << "\t dropIndexes done";
Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
@@ -607,7 +607,7 @@ void dropAllDatabasesExceptLocal(OperationContext* txn) {
if (n.size() == 0)
return;
- log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
+ log() << "dropAllDatabasesExceptLocal " << n.size();
repl::getGlobalReplicationCoordinator()->dropAllSnapshots();
for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 702bc51ec73..29b95228a2d 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -138,7 +138,7 @@ IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
Status status = _isSpecOk(txn, descriptor->infoObj());
if (!status.isOK() && status != ErrorCodes::IndexAlreadyExists) {
severe() << "Found an invalid index " << descriptor->infoObj() << " on the "
- << _collection->ns().ns() << " collection: " << status.reason();
+ << _collection->ns().ns() << " collection: " << redact(status);
fassertFailedNoTrace(28782);
}
@@ -209,14 +209,14 @@ bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& k
// RulesFor22
if (!known) {
- log() << "warning: can't find plugin [" << pluginName << "]" << endl;
+ log() << "warning: can't find plugin [" << pluginName << "]";
return true;
}
if (!IndexNames::existedBefore24(pluginName)) {
warning() << "Treating index " << keyPattern << " as ascending since "
<< "it was created before 2.4 and '" << pluginName << "' "
- << "was not a valid type at that time." << endl;
+ << "was not a valid type at that time.";
return true;
}
@@ -712,7 +712,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
findIndexByKeyPatternAndCollationSpec(txn, key, collation, findInProgressIndexes);
if (desc) {
LOG(2) << "index already exists with diff name " << name << " pattern: " << key
- << " collation: " << collation << endl;
+ << " collation: " << collation;
IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
@@ -931,7 +931,7 @@ void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
// this is ok, as we may be partially through index creation
} else if (!status.isOK()) {
warning() << "couldn't drop index " << indexName << " on collection: " << _collection->ns()
- << " because of " << status.toString();
+ << " because of " << redact(status);
}
}
@@ -1266,8 +1266,8 @@ Status IndexCatalog::_unindexRecord(OperationContext* txn,
Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
if (!status.isOK()) {
- log() << "Couldn't unindex record " << obj.toString() << " from collection "
- << _collection->ns() << ". Status: " << status.toString();
+ log() << "Couldn't unindex record " << redact(obj) << " from collection "
+ << _collection->ns() << ". Status: " << redact(status);
}
if (keysDeletedOut) {
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 83a92abd9c7..c67b29f8559 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -116,7 +116,7 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
invariantOK(statusWithMatcher.getStatus());
_filterExpression = std::move(statusWithMatcher.getValue());
LOG(2) << "have filter expression for " << _ns << " " << _descriptor->indexName() << " "
- << filter;
+ << redact(filter);
}
}
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index dcde6aee103..7f27589085d 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -125,7 +125,7 @@ MultiIndexBlock::~MultiIndexBlock() {
} catch (const DBException& e) {
if (e.toStatus() == ErrorCodes::ExceededMemoryLimit)
continue;
- error() << "Caught exception while cleaning up partially built indexes: " << e.what();
+ error() << "Caught exception while cleaning up partially built indexes: " << redact(e);
} catch (const std::exception& e) {
error() << "Caught exception while cleaning up partially built indexes: " << e.what();
} catch (...) {
@@ -337,8 +337,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
if (!ret.isOK())
return ret;
- log() << "build index done. scanned " << n << " total records. " << t.seconds() << " secs"
- << endl;
+ log() << "build index done. scanned " << n << " total records. " << t.seconds() << " secs";
return Status::OK();
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index ec78619b22e..249b807ab53 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -163,7 +163,7 @@ struct Cloner::Fun {
if (now - lastLog >= 60) {
// report progress
if (lastLog)
- log() << "clone " << to_collection << ' ' << numSeen << endl;
+ log() << "clone " << to_collection << ' ' << numSeen;
lastLog = now;
}
txn->checkForInterrupt();
@@ -224,7 +224,7 @@ struct Cloner::Fun {
if (!status.isOK()) {
str::stream ss;
ss << "Cloner: found corrupt document in " << from_collection.toString() << ": "
- << status.reason();
+ << redact(status);
if (skipCorruptDocumentsWhenCloning) {
warning() << ss.ss.str() << "; skipping";
continue;
@@ -244,7 +244,7 @@ struct Cloner::Fun {
Status status = collection->insertDocument(txn, doc, nullOpDebug, true);
if (!status.isOK()) {
error() << "error: exception cloning object in " << from_collection << ' '
- << status << " obj:" << doc;
+ << redact(status) << " obj:" << redact(doc);
}
uassertStatusOK(status);
wunit.commit();
@@ -280,7 +280,7 @@ void Cloner::copy(OperationContext* txn,
const CloneOptions& opts,
Query query) {
LOG(2) << "\t\tcloning collection " << from_collection << " to " << to_collection << " on "
- << _conn->getServerAddress() << " with filter " << query.toString() << endl;
+ << _conn->getServerAddress() << " with filter " << redact(query.toString());
Fun f(txn, toDBName);
f.numSeen = 0;
@@ -461,7 +461,7 @@ bool Cloner::copyCollection(OperationContext* txn,
/* TODO : copyIndexes bool does not seem to be implemented! */
if (!shouldCopyIndexes) {
- log() << "ERROR copy collection shouldCopyIndexes not implemented? " << ns << endl;
+ log() << "ERROR copy collection shouldCopyIndexes not implemented? " << ns;
}
// indexes
@@ -494,7 +494,7 @@ StatusWith<std::vector<BSONObj>> Cloner::filterCollectionsForClone(
if (ns.isSystem()) {
if (legalClientSystemNS(ns.ns()) == 0) {
- LOG(2) << "\t\t not cloning because system collection" << endl;
+ LOG(2) << "\t\t not cloning because system collection";
continue;
}
}
@@ -631,7 +631,7 @@ Status Cloner::copyDb(OperationContext* txn,
}
}
for (auto&& collection : toClone) {
- LOG(2) << " really will clone: " << collection << endl;
+ LOG(2) << " really will clone: " << collection;
const char* collectionName = collection["name"].valuestr();
BSONObj options = collection.getObjectField("options");
@@ -643,7 +643,7 @@ Status Cloner::copyDb(OperationContext* txn,
clonedColls->insert(from_name.ns());
}
- LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
+ LOG(1) << "\t\t cloning " << from_name << " -> " << to_name;
Query q;
if (opts.snapshot)
q.snapshot();
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
index 3589cc309f8..945b1239abe 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
@@ -299,29 +299,22 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
const BSONObj& data = customTimeout.getData();
lockExpiration = Milliseconds(data["timeoutMs"].numberInt());
}
- // REDACT: can this whyMessage ever have PII?
LOG(1) << "trying to acquire new distributed lock for " << name
<< " ( lock timeout : " << durationCount<Milliseconds>(lockExpiration)
<< " ms, ping interval : " << durationCount<Milliseconds>(_pingInterval)
<< " ms, process : " << _processID << " )"
- << " with lockSessionID: " << lockSessionID
- << ", why: " << redact(whyMessage.toString());
+ << " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
- auto lockResult = _catalog->grabLock(txn,
- name,
- lockSessionID,
- who,
- _processID,
- Date_t::now(),
- redact(whyMessage.toString()));
+ auto lockResult = _catalog->grabLock(
+ txn, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
auto status = lockResult.getStatus();
if (status.isOK()) {
// Lock is acquired since findAndModify was able to successfully modify
// the lock document.
- log() << "distributed lock '" << name << "' acquired for '"
- << redact(whyMessage.toString()) << "', ts : " << lockSessionID;
+ log() << "distributed lock '" << name << "' acquired for '" << whyMessage.toString()
+ << "', ts : " << lockSessionID;
return lockSessionID;
}
@@ -412,7 +405,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// Periodically message for debugging reasons
if (msgTimer.seconds() > 10) {
LOG(0) << "waited " << timer.seconds() << "s for distributed lock " << name << " for "
- << redact(whyMessage.toString());
+ << whyMessage.toString();
msgTimer.reset();
}