diff options
author | Ramon Fernandez <ramon@mongodb.com> | 2016-08-27 15:18:19 -0400 |
---|---|---|
committer | Ramon Fernandez <ramon@mongodb.com> | 2016-08-27 16:19:54 -0400 |
commit | c5f891c37a7fabc2455f2f0230ec9bcd76b62096 (patch) | |
tree | 520f3969fe6983c8a722fa7c78fe897093922a17 /src/mongo/db | |
parent | 76a32a783ca4328e9b01555fb05d02ff635d8c19 (diff) | |
download | mongo-c5f891c37a7fabc2455f2f0230ec9bcd76b62096.tar.gz |
SERVER-24991 log redaction for storage mongos
Diffstat (limited to 'src/mongo/db')
-rw-r--r-- | src/mongo/db/run_commands.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/s/check_sharding_index_command.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/cleanup_orphaned_cmd.cpp | 15 | ||||
-rw-r--r-- | src/mongo/db/s/collection_metadata.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/merge_chunks_command.cpp | 47 | ||||
-rw-r--r-- | src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/s/migration_destination_manager.cpp | 37 | ||||
-rw-r--r-- | src/mongo/db/s/migration_destination_manager_legacy_commands.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/s/migration_source_manager.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/move_chunk_command.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/s/move_timing_helper.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/s/set_shard_version_command.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.cpp | 5 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state_recovery.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/s/split_chunk_command.cpp | 32 | ||||
-rw-r--r-- | src/mongo/db/s/split_vector_command.cpp | 12 | ||||
-rw-r--r-- | src/mongo/db/stats/snapshots.cpp | 2 |
17 files changed, 106 insertions, 95 deletions
diff --git a/src/mongo/db/run_commands.cpp b/src/mongo/db/run_commands.cpp index 108ff103696..41adbdfb507 100644 --- a/src/mongo/db/run_commands.cpp +++ b/src/mongo/db/run_commands.cpp @@ -57,7 +57,8 @@ void runCommands(OperationContext* txn, << "'"; LOG(2) << msg; uasserted(ErrorCodes::CommandNotFound, - str::stream() << msg << ", bad cmd: '" << request.getCommandArgs() << "'"); + str::stream() << msg << ", bad cmd: '" << redact(request.getCommandArgs()) + << "'"); } LOG(2) << "run command " << request.getDatabase() << ".$cmd" << ' ' diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp index 379e15db6f6..b55c2ac6a4d 100644 --- a/src/mongo/db/s/check_sharding_index_command.cpp +++ b/src/mongo/db/s/check_sharding_index_command.cpp @@ -187,8 +187,8 @@ public: continue; const string msg = str::stream() - << "found missing value in key " << currKey << " for doc: " - << (obj.hasField("_id") ? obj.toString() : obj["_id"].toString()); + << "found missing value in key " << redact(currKey) + << " for doc: " << (obj.hasField("_id") ? redact(obj) : redact(obj["_id"])); log() << "checkShardingIndex for '" << nss.toString() << "' failed: " << msg; errmsg = msg; diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp index 37b90be8211..281d1cdcc39 100644 --- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp +++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp @@ -96,8 +96,9 @@ CleanupResult cleanupOrphanedData(OperationContext* txn, BSONObj keyPattern = metadata->getKeyPattern(); if (!startingFromKey.isEmpty()) { if (!metadata->isValidKey(startingFromKey)) { - *errMsg = stream() << "could not cleanup orphaned data, start key " << startingFromKey - << " does not match shard key pattern " << keyPattern; + *errMsg = stream() << "could not cleanup orphaned data, start key " + << redact(startingFromKey) << " does not match shard key pattern " + << keyPattern; warning() << *errMsg; return CleanupResult_Error; @@ -109,7 +110,7 @@ CleanupResult cleanupOrphanedData(OperationContext* txn, KeyRange orphanRange; if (!metadata->getNextOrphanRange(startingFromKey, &orphanRange)) { LOG(1) << "cleanupOrphaned requested for " << ns.toString() << " starting from " - << startingFromKey << ", no orphan ranges remain"; + << redact(startingFromKey) << ", no orphan ranges remain"; return CleanupResult_Done; } @@ -117,8 +118,8 @@ CleanupResult cleanupOrphanedData(OperationContext* txn, *stoppedAtKey = orphanRange.maxKey; LOG(0) << "cleanupOrphaned requested for " << ns.toString() << " starting from " - << startingFromKey << ", removing next orphan range" - << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")"; + << redact(startingFromKey) << ", removing next orphan range" + << " [" << redact(orphanRange.minKey) << "," << redact(orphanRange.maxKey) << ")"; // Metadata snapshot may be stale now, but deleter checks metadata again in write lock // before delete. @@ -132,7 +133,7 @@ CleanupResult cleanupOrphanedData(OperationContext* txn, deleterOptions.removeSaverReason = "cleanup-cmd"; if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) { - warning() << *errMsg; + warning() << redact(*errMsg); return CleanupResult_Error; } @@ -248,7 +249,7 @@ public: warning() << "Shard version in transition detected while refreshing " << "metadata for " << ns << " at version " << shardVersion; } else { - errmsg = str::stream() << "failed to refresh shard metadata: " << status.reason(); + errmsg = str::stream() << "failed to refresh shard metadata: " << redact(status); return false; } } diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index 4a644cfbbdb..a8ca9a8255b 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -148,8 +148,8 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( RangeVector pendingOverlap; getRangeMapOverlap(_pendingMap, chunk.getMin(), chunk.getMax(), &pendingOverlap); - warning() << "new pending chunk " << rangeToString(chunk.getMin(), chunk.getMax()) - << " overlaps existing pending chunks " << overlapToString(pendingOverlap) + warning() << "new pending chunk " << redact(rangeToString(chunk.getMin(), chunk.getMax())) + << " overlaps existing pending chunks " << redact(overlapToString(pendingOverlap)) << ", a migration may not have completed"; for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) { diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp index ac169aadda5..39e29d7bd9c 100644 --- a/src/mongo/db/s/merge_chunks_command.cpp +++ b/src/mongo/db/s/merge_chunks_command.cpp @@ -169,7 +169,9 @@ bool mergeChunks(OperationContext* txn, if (!scopedDistLock.isOK()) { *errMsg = stream() << "could not acquire collection lock for " << nss.ns() - << " to merge chunks in [" << minKey << "," << maxKey << ")" + << " to merge chunks in [" << redact(minKey) << "," << redact(maxKey) + << ")" + // REDACT??? Not sure how to handle the causedBy's << causedBy(scopedDistLock.getStatus()); warning() << *errMsg; @@ -187,7 +189,7 @@ bool mergeChunks(OperationContext* txn, if (!status.isOK()) { *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for " - << nss.ns() << causedBy(status.reason()); + << nss.ns() << redact(status); warning() << *errMsg; return false; @@ -220,10 +222,10 @@ bool mergeChunks(OperationContext* txn, dassert(metadata->getShardVersion().equals(shardVersion)); if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) { - *errMsg = stream() << "could not merge chunks, the range " << rangeToString(minKey, maxKey) - << " is not valid" + *errMsg = stream() << "could not merge chunks, the range " + << redact(rangeToString(minKey, maxKey)) << " is not valid" << " for collection " << nss.ns() << " with key pattern " - << metadata->getKeyPattern(); + << metadata->getKeyPattern().toString(); warning() << *errMsg; return false; @@ -250,8 +252,8 @@ bool mergeChunks(OperationContext* txn, if (chunksToMerge.empty()) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() - << " range starting at " << minKey << " and ending at " << maxKey - << " does not belong to shard " << gss->getShardName(); + << " range starting at " << redact(minKey) << " and ending at " + << redact(maxKey) << " does not belong to shard " << gss->getShardName(); warning() << *errMsg; return false; @@ -268,8 +270,8 @@ bool mergeChunks(OperationContext* txn, if (!minKeyInRange) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() - << " range starting at " << minKey << " does not belong to shard " - << gss->getShardName(); + << " range starting at " << redact(minKey) + << " does not belong to shard " << gss->getShardName(); warning() << *errMsg; return false; @@ -282,7 +284,7 @@ bool mergeChunks(OperationContext* txn, if (!maxKeyInRange) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() - << " range ending at " << maxKey << " does not belong to shard " + << " range ending at " << redact(maxKey) << " does not belong to shard " << gss->getShardName(); warning() << *errMsg; @@ -293,11 +295,12 @@ bool mergeChunks(OperationContext* txn, bool validRangeEndKey = lastDocMax.woCompare(maxKey) == 0; if (!validRangeStartKey || !validRangeEndKey) { - *errMsg = stream() << "could not merge chunks, collection " << nss.ns() - << " does not contain a chunk " - << (!validRangeStartKey ? "starting at " + minKey.toString() : "") - << (!validRangeStartKey && !validRangeEndKey ? " or " : "") - << (!validRangeEndKey ? "ending at " + maxKey.toString() : ""); + *errMsg = + stream() << "could not merge chunks, collection " << nss.ns() + << " does not contain a chunk " + << (!validRangeStartKey ? "starting at " + redact(minKey.toString()) : "") + << (!validRangeStartKey && !validRangeEndKey ? " or " : "") + << (!validRangeEndKey ? "ending at " + redact(maxKey.toString()) : ""); warning() << *errMsg; return false; @@ -305,7 +308,8 @@ bool mergeChunks(OperationContext* txn, if (chunksToMerge.size() == 1) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() - << " already contains chunk for " << rangeToString(minKey, maxKey); + << " already contains chunk for " + << redact(rangeToString(minKey, maxKey)); warning() << *errMsg; return false; @@ -314,10 +318,11 @@ bool mergeChunks(OperationContext* txn, // Look for hole in range for (size_t i = 1; i < chunksToMerge.size(); ++i) { if (chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) != 0) { - *errMsg = - stream() << "could not merge chunks, collection " << nss.ns() - << " has a hole in the range " << rangeToString(minKey, maxKey) << " at " - << rangeToString(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()); + *errMsg = stream() << "could not merge chunks, collection " << nss.ns() + << " has a hole in the range " + << redact(rangeToString(minKey, maxKey)) << " at " + << redact(rangeToString(chunksToMerge[i - 1].getMax(), + chunksToMerge[i].getMin())); warning() << *errMsg; return false; @@ -335,7 +340,7 @@ bool mergeChunks(OperationContext* txn, Status applyOpsStatus = runApplyOpsCmd(txn, chunksToMerge, shardVersion, mergeVersion); if (!applyOpsStatus.isOK()) { - warning() << applyOpsStatus; + warning() << redact(applyOpsStatus); return false; } diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index a4ed3c77a7c..2be1bcbab01 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -353,7 +353,7 @@ void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* txn, BSONElement idElement = insertedDoc["_id"]; if (idElement.eoo()) { warning() << "logInsertOp got a document with no _id field, ignoring inserted document: " - << insertedDoc; + << redact(insertedDoc); return; } @@ -371,7 +371,7 @@ void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* txn, BSONElement idElement = updatedDoc["_id"]; if (idElement.eoo()) { warning() << "logUpdateOp got a document with no _id field, ignoring updatedDoc: " - << updatedDoc; + << redact(updatedDoc); return; } @@ -389,7 +389,7 @@ void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* txn, BSONElement idElement = deletedDocId["_id"]; if (idElement.eoo()) { warning() << "logDeleteOp got a document with no _id field, ignoring deleted doc: " - << deletedDocId; + << redact(deletedDocId); return; } diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 87b45e9df03..116b722bd87 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -411,7 +411,7 @@ void MigrationDestinationManager::_migrateThread(std::string ns, _errmsg = e.what(); } - error() << "migrate failed: " << e.what() << migrateLog; + error() << "migrate failed: " << redact(e.what()) << migrateLog; } catch (...) { { stdx::lock_guard<stdx::mutex> sl(_mutex); @@ -426,7 +426,7 @@ void MigrationDestinationManager::_migrateThread(std::string ns, // Unprotect the range if needed/possible on unsuccessful TO migration Status status = _forgetPending(opCtx.get(), NamespaceString(ns), min, max, epoch); if (!status.isOK()) { - warning() << "Failed to remove pending range" << causedBy(status); + warning() << "Failed to remove pending range" << redact(causedBy(status)); } } @@ -448,7 +448,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, invariant(!min.isEmpty()); invariant(!max.isEmpty()); - log() << "starting receiving-end of migration of chunk " << min << " -> " << max + log() << "starting receiving-end of migration of chunk " << redact(min) << " -> " << redact(max) << " for collection " << ns << " from " << fromShardConnString << " at epoch " << epoch.toString(); @@ -505,7 +505,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, Status status = userCreateNS(txn, db, ns, options, false); if (!status.isOK()) { warning() << "failed to create collection [" << ns << "] " - << " with options " << options << ": " << status; + << " with options " << options << ": " << redact(status); } wuow.commit(); } @@ -574,7 +574,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, Status status = indexer.init(indexSpecsWithCollation); if (!status.isOK()) { errmsg = str::stream() << "failed to create index before migrating data. " - << " error: " << status.toString(); + << " error: " << redact(status); warning() << errmsg; setState(FAIL); return; @@ -583,7 +583,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, status = indexer.insertAllDocumentsInCollection(); if (!status.isOK()) { errmsg = str::stream() << "failed to create index before migrating data. " - << " error: " << status.toString(); + << " error: " << redact(status); warning() << errmsg; setState(FAIL); return; @@ -624,7 +624,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, deleterOptions.removeSaverReason = "preCleanup"; if (!getDeleter()->deleteNow(txn, deleterOptions, &errmsg)) { - warning() << "Failed to queue delete for migrate abort: " << errmsg; + warning() << "Failed to queue delete for migrate abort: " << redact(errmsg); setState(FAIL); return; } @@ -653,7 +653,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, deleterOptions.onlyRemoveOrphanedDocs = true; if (!getDeleter()->queueDelete(txn, deleterOptions, NULL /* notifier */, &errMsg)) { - warning() << "Failed to queue delete for migrate abort: " << errMsg; + warning() << "Failed to queue delete for migrate abort: " << redact(errMsg); } } @@ -670,7 +670,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, res)) { // gets array of objects to copy, in disk order setState(FAIL); errmsg = "_migrateClone failed: "; - errmsg += res.toString(); + errmsg += redact(res.toString()); error() << errmsg << migrateLog; conn.done(); return; @@ -697,8 +697,9 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, if (willOverrideLocalId( txn, ns, min, max, shardKeyPattern, cx.db(), docToClone, &localDoc)) { string errMsg = str::stream() << "cannot migrate chunk, local document " - << localDoc << " has same _id as cloned " - << "remote document " << docToClone; + << redact(localDoc) + << " has same _id as cloned " + << "remote document " << redact(docToClone); warning() << errMsg; @@ -755,8 +756,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, if (!conn->runCommand("admin", xferModsRequest, res)) { setState(FAIL); errmsg = "_transferMods failed: "; - errmsg += res.toString(); - error() << "_transferMods failed: " << res << migrateLog; + errmsg += redact(res); + error() << "_transferMods failed: " << redact(res) << migrateLog; conn.done(); return; } @@ -852,7 +853,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn, BSONObj res; if (!conn->runCommand("admin", xferModsRequest, res)) { - log() << "_transferMods failed in STEADY state: " << res << migrateLog; + log() << "_transferMods failed in STEADY state: " << redact(res) << migrateLog; errmsg = res.toString(); setState(FAIL); conn.done(); @@ -1003,8 +1004,8 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn, if (!opReplicatedEnough(txn, lastOpApplied, writeConcern)) { repl::OpTime op(lastOpApplied); OCCASIONALLY warning() << "migrate commit waiting for a majority of slaves for '" << ns - << "' " << min << " -> " << max << " waiting for: " << op - << migrateLog; + << "' " << redact(min) << " -> " << redact(max) + << " waiting for: " << op << migrateLog; return false; } @@ -1018,8 +1019,8 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn, // if durability is on, force a write to journal if (getDur().commitNow(txn)) { - log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max - << migrateLog; + log() << "migrate commit flushed to journal for '" << ns << "' " << redact(min) + << " -> " << redact(max) << migrateLog; } } diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp index dca0a9c43bd..77d05403ef4 100644 --- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp +++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp @@ -143,7 +143,8 @@ public: Status status = shardingState->refreshMetadataNow(txn, ns, ¤tVersion); if (!status.isOK()) { errmsg = str::stream() << "cannot start recv'ing chunk " - << "[" << min << "," << max << ")" << causedBy(status.reason()); + << "[" << redact(min) << "," << redact(max) << ")" + << redact(status); warning() << errmsg; return false; @@ -160,8 +161,8 @@ public: auto statusWithFromShardConnectionString = ConnectionString::parse(cmdObj["from"].String()); if (!statusWithFromShardConnectionString.isOK()) { errmsg = str::stream() << "cannot start recv'ing chunk " - << "[" << min << "," << max << ")" - << causedBy(statusWithFromShardConnectionString.getStatus()); + << "[" << redact(min) << "," << redact(max) << ")" + << redact(statusWithFromShardConnectionString.getStatus()); warning() << errmsg; return false; diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index f1bb4701e34..f8cf44048fd 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -93,7 +93,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR const ChunkVersion expectedCollectionVersion = oss.getShardVersion(_args.getNss()); log() << "Starting chunk migration for " - << ChunkRange(_args.getMinKey(), _args.getMaxKey()).toString() + << redact(ChunkRange(_args.getMinKey(), _args.getMaxKey()).toString()) << " with expected collection version " << expectedCollectionVersion; // Now that the collection is locked, snapshot the metadata and fetch the latest versions @@ -367,7 +367,7 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) { // the original chunks no longer exist. warning() << "Migration metadata commit may have failed: refreshing metadata to check" - << causedBy(commitChunkMigrationResponse.getStatus()); + << redact(commitChunkMigrationResponse.getStatus()); // Need to get the latest optime in case the refresh request goes to a secondary -- // otherwise the read won't wait for the write that _configsvrCommitChunkMigration may have diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp index 64b5d2081d4..d255a2d8b98 100644 --- a/src/mongo/db/s/move_chunk_command.cpp +++ b/src/mongo/db/s/move_chunk_command.cpp @@ -71,8 +71,8 @@ DistLockManager::ScopedDistLock acquireCollectionDistLock(OperationContext* txn, if (!distLockStatus.isOK()) { const string msg = str::stream() << "Could not acquire collection lock for " << args.getNss().ns() - << " to migrate chunk [" << args.getMinKey() << "," << args.getMaxKey() << ") due to " - << distLockStatus.getStatus().toString(); + << " to migrate chunk [" << redact(args.getMinKey()) << "," << redact(args.getMaxKey()) + << ") due to " << distLockStatus.getStatus().toString(); warning() << msg; uasserted(distLockStatus.getStatus().code(), msg); } @@ -86,7 +86,7 @@ DistLockManager::ScopedDistLock acquireCollectionDistLock(OperationContext* txn, */ void uassertStatusOKWithWarning(const Status& status) { if (!status.isOK()) { - warning() << "Chunk move failed" << causedBy(status); + warning() << "Chunk move failed" << redact(status); uassertStatusOK(status); } } @@ -275,7 +275,7 @@ private: // This is an immediate delete, and as a consequence, there could be more // deletes happening simultaneously than there are deleter worker threads. if (!getDeleter()->deleteNow(txn, deleterOptions, &errMsg)) { - log() << "Error occured while performing cleanup: " << errMsg; + log() << "Error occured while performing cleanup: " << redact(errMsg); } } else { log() << "forking for cleanup of chunk data"; @@ -285,7 +285,7 @@ private: deleterOptions, NULL, // Don't want to be notified &errMsg)) { - log() << "could not queue migration cleanup: " << errMsg; + log() << "could not queue migration cleanup: " << redact(errMsg); } } diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp index 79cb8e61fad..222a5383002 100644 --- a/src/mongo/db/s/move_timing_helper.cpp +++ b/src/mongo/db/s/move_timing_helper.cpp @@ -88,7 +88,8 @@ MoveTimingHelper::~MoveTimingHelper() { _b.obj(), ShardingCatalogClient::kMajorityWriteConcern); } catch (const std::exception& e) { - warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what(); + warning() << "couldn't record timing for moveChunk '" << _where + << "': " << redact(e.what()); } } diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp index ff371a0bce3..a8ae64aa18a 100644 --- a/src/mongo/db/s/set_shard_version_command.cpp +++ b/src/mongo/db/s/set_shard_version_command.cpp @@ -314,7 +314,7 @@ public: errmsg = str::stream() << "could not refresh metadata for " << ns << " with requested shard version " << requestedVersion.toString() << ", stored shard version is " - << currVersion.toString() << causedBy(status.reason()); + << currVersion.toString() << redact(status); warning() << errmsg; diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index 16898c65b21..30cfa7f0f18 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -112,7 +112,7 @@ void updateShardIdentityConfigStringCB(const string& setName, const string& newC ->updateShardIdentityConfigString(uniqOpCtx.get(), newConnectionString); if (!status.isOK() && !ErrorCodes::isNotMasterError(status.code())) { warning() << "error encountered while trying to update config connection string to " - << newConnectionString << causedBy(status); + << newConnectionString << redact(status); } } @@ -714,8 +714,7 @@ StatusWith<ChunkVersion> ShardingState::_refreshMetadata( if (status.code() == ErrorCodes::NamespaceNotFound) { remoteMetadata.reset(); } else if (!status.isOK()) { - warning() << "Could not remotely refresh metadata for " << nss.ns() - << causedBy(status.reason()); + warning() << "Could not remotely refresh metadata for " << nss.ns() << redact(status); return status; } diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index 544086cf713..3d6c5b36fd2 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -196,7 +196,7 @@ Status modifyRecoveryDocument(OperationContext* txn, grid.configOpTime(), change); - LOG(1) << "Changing sharding recovery document " << updateObj; + LOG(1) << "Changing sharding recovery document " << redact(updateObj); UpdateRequest updateReq(NamespaceString::kConfigCollectionNamespace); updateReq.setQuery(RecoveryDocument::getQuery()); @@ -241,7 +241,7 @@ Status ShardingStateRecovery::startMetadataOp(OperationContext* txn) { void ShardingStateRecovery::endMetadataOp(OperationContext* txn) { Status status = modifyRecoveryDocument(txn, RecoveryDocument::Decrement, WriteConcernOptions()); if (!status.isOK()) { - warning() << "Failed to decrement minOpTimeUpdaters due to " << status; + warning() << "Failed to decrement minOpTimeUpdaters due to " << redact(status); } } @@ -268,7 +268,7 @@ Status ShardingStateRecovery::recover(OperationContext* txn) { const auto recoveryDoc = std::move(recoveryDocStatus.getValue()); - log() << "Sharding state recovery process found document " << recoveryDoc.toBSON(); + log() << "Sharding state recovery process found document " << redact(recoveryDoc.toBSON()); // Make sure the sharding state is initialized ShardingState* const shardingState = ShardingState::get(txn); @@ -308,7 +308,7 @@ Status ShardingStateRecovery::recover(OperationContext* txn) { // Finally, clear the recovery document so next time we don't need to recover status = modifyRecoveryDocument(txn, RecoveryDocument::Clear, kLocalWriteConcern); if (!status.isOK()) { - warning() << "Failed to reset sharding state recovery document due to " << status; + warning() << "Failed to reset sharding state recovery document due to " << redact(status); } return Status::OK(); diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp index 8f40c2fcf94..028adbd29a3 100644 --- a/src/mongo/db/s/split_chunk_command.cpp +++ b/src/mongo/db/s/split_chunk_command.cpp @@ -118,12 +118,13 @@ bool checkMetadataForSuccess(OperationContext* txn, ChunkType nextChunk; for (const auto& endKey : newChunkBounds) { - log() << "JESS: checking metadataAfterSplit for new chunk boundaries [" << startKey << "," - << endKey << ")"; + log() << "checking metadataAfterSplit for new chunk boundaries [" << redact(startKey) << "," + << redact(endKey) << ")"; // Check that all new chunks fit the new chunk boundaries if (!metadataAfterSplit->getNextChunk(startKey, &nextChunk) || nextChunk.getMax().woCompare(endKey)) { - log() << "JESS: ERROR, found [" << startKey << "," << nextChunk.getMax() << ")"; + log() << "ERROR, found [" << redact(startKey) << "," << redact(nextChunk.getMax()) + << ")"; return false; } @@ -249,7 +250,7 @@ public: // Initialize our current shard name in the shard state if needed shardingState->setShardName(shardName); - log() << "received splitChunk request: " << cmdObj; + log() << "received splitChunk request: " << redact(cmdObj); // // Lock the collection's metadata and get highest version for the current shard @@ -262,8 +263,8 @@ public: txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout); if (!scopedDistLock.isOK()) { errmsg = str::stream() << "could not acquire collection lock for " << nss.toString() - << " to split chunk [" << min << "," << max << ")" - << causedBy(scopedDistLock.getStatus()); + << " to split chunk [" << redact(min) << "," << redact(max) + << ") " << redact(scopedDistLock.getStatus()); warning() << errmsg; return false; } @@ -274,8 +275,8 @@ public: if (!refreshStatus.isOK()) { errmsg = str::stream() << "splitChunk cannot split chunk " - << "[" << min << "," << max << ")" - << causedBy(refreshStatus.reason()); + << "[" << redact(min) << "," << redact(max) << ") " + << redact(refreshStatus); warning() << errmsg; return false; @@ -284,7 +285,7 @@ public: if (shardVersion.majorVersion() == 0) { // It makes no sense to split if our version is zero and we have no chunks errmsg = str::stream() << "splitChunk cannot split chunk " - << "[" << min << "," << max << ")" + << "[" << redact(min) << "," << redact(max) << ") " << " with zero shard version"; warning() << errmsg; @@ -300,7 +301,7 @@ public: ChunkVersion expectedCollectionVersion = oss.getShardVersion(nss); if (expectedCollectionVersion.epoch() != shardVersion.epoch()) { std::string msg = str::stream() << "splitChunk cannot split chunk " - << "[" << min << "," << max << "), " + << "[" << redact(min) << "," << redact(max) << "), " << "collection may have been dropped. " << "current epoch: " << shardVersion.epoch() << ", cmd epoch: " << expectedCollectionVersion.epoch(); @@ -329,7 +330,7 @@ public: origChunk.getMax().woCompare(max)) { // Our boundaries are different from those passed in std::string msg = str::stream() << "splitChunk cannot find chunk " - << "[" << min << "," << max << ")" + << "[" << redact(min) << "," << redact(max) << ") " << " to split, the chunk boundaries may be stale"; warning() << msg; throw SendStaleConfigException( @@ -368,8 +369,8 @@ public: refreshStatus = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersionAfterSplit); if (!refreshStatus.isOK()) { - errmsg = str::stream() << "failed to refresh metadata for split chunk [" << min << "," - << max << ")" << causedBy(refreshStatus.reason()); + errmsg = str::stream() << "failed to refresh metadata for split chunk [" << redact(min) + << "," << redact(max) << ") " << redact(refreshStatus); warning() << errmsg; return false; @@ -385,7 +386,7 @@ public: // Send stale epoch if epoch of request did not match epoch of collection if (commandStatus == ErrorCodes::StaleEpoch) { std::string msg = str::stream() << "splitChunk cannot split chunk " - << "[" << min << "," << max << "), " + << "[" << redact(min) << "," << redact(max) << "), " << "collection may have been dropped. " << "current epoch: " << collVersion.epoch() << ", cmd epoch: " << expectedCollectionVersion.epoch(); @@ -406,7 +407,8 @@ public: if ((!commandStatus.isOK() || !writeConcernStatus.isOK()) && checkMetadataForSuccess(txn, nss, chunkRange, splitKeys)) { - LOG(1) << "splitChunk [" << min << "," << max << ") has already been committed."; + LOG(1) << "splitChunk [" << redact(min) << "," << redact(max) + << ") has already been committed."; } else if (!commandStatus.isOK()) { return appendCommandStatus(result, commandStatus); } else if (!writeConcernStatus.isOK()) { diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp index 9e2ad5fde75..77503dda5f5 100644 --- a/src/mongo/db/s/split_vector_command.cpp +++ b/src/mongo/db/s/split_vector_command.cpp @@ -229,8 +229,8 @@ public: return true; } - log() << "request split points lookup for chunk " << nss.toString() << " " << min - << " -->> " << max; + log() << "request split points lookup for chunk " << nss.toString() << " " + << redact(min) << " -->> " << redact(max); // We'll use the average object size and number of object to find approximately how many // keys each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects, @@ -292,15 +292,15 @@ public: splitKeys.push_back(currKey.getOwned()); currCount = 0; numChunks++; - LOG(4) << "picked a split key: " << currKey; + LOG(4) << "picked a split key: " << redact(currKey); } } // Stop if we have enough split points. if (maxSplitPoints && (numChunks >= maxSplitPoints)) { log() << "max number of requested split points reached (" << numChunks - << ") before the end of chunk " << nss.toString() << " " << min - << " -->> " << max; + << ") before the end of chunk " << nss.toString() << " " + << redact(min) << " -->> " << redact(max); break; } @@ -360,7 +360,7 @@ public: if (timer.millis() > serverGlobalParams.slowMS) { warning() << "Finding the split vector for " << nss.toString() << " over " - << keyPattern << " keyCount: " << keyCount + << redact(keyPattern) << " keyCount: " << keyCount << " numSplits: " << splitKeys.size() << " lookedAt: " << currCount << " took " << timer.millis() << "ms"; } diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp index 8acf20b7e7a..a9d31319d2b 100644 --- a/src/mongo/db/stats/snapshots.cpp +++ b/src/mongo/db/stats/snapshots.cpp @@ -109,7 +109,7 @@ void StatsSnapshotThread::run() { try { statsSnapshots.takeSnapshot(); } catch (std::exception& e) { - log() << "ERROR in SnapshotThread: " << e.what() << endl; + log() << "ERROR in SnapshotThread: " << redact(e.what()) << endl; } sleepsecs(4); |