summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2018-11-01 21:17:00 +0000
committerKevin Pulo <kevin.pulo@mongodb.com>2018-11-14 01:38:39 +0000
commitc2cc425b9d2b23eead06ecbfd996375e47c81baa (patch)
treeca24166f5800b1cbee8254148b26c41a9c9f0195
parent20ca44d3ab1dce18c71ff726fcab3591b809137a (diff)
downloadmongo-c2cc425b9d2b23eead06ecbfd996375e47c81baa.tar.gz
SERVER-36411 include shard id/name in changelog/actionlog entries
-rw-r--r--src/mongo/db/s/SConscript26
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp15
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp39
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp7
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp9
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp9
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp11
-rw-r--r--src/mongo/db/s/sharding_logging.cpp197
-rw-r--r--src/mongo/db/s/sharding_logging.h110
-rw-r--r--src/mongo/db/s/sharding_logging_test.cpp (renamed from src/mongo/s/catalog/sharding_catalog_log_change_test.cpp)13
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp13
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp25
-rw-r--r--src/mongo/s/catalog/SConscript1
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h27
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp125
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h45
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp15
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h11
-rw-r--r--src/mongo/s/catalog/type_changelog.cpp16
-rw-r--r--src/mongo/s/catalog/type_changelog.h10
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp60
23 files changed, 502 insertions, 294 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index b91d82a2691..2e91314615d 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -82,6 +82,7 @@ env.Library(
'migration_types',
'sharding_api_d',
'sharding_catalog_manager',
+ 'sharding_logging',
],
)
@@ -189,6 +190,7 @@ env.Library(
'$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
'$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/s/coreshard',
+ 'sharding_logging',
],
)
@@ -426,3 +428,27 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/util/version_impl',
]
)
+
+env.Library(
+ target='sharding_logging',
+ source=[
+ 'sharding_logging.cpp',
+ ],
+ LIBDEPS=[
+ 'sharding_api_d',
+ '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
+ ],
+)
+
+env.CppUnitTest(
+ target='sharding_logging_test',
+ source=[
+ 'sharding_logging_test.cpp',
+ ],
+ LIBDEPS=[
+ 'sharding_runtime_d',
+ 'sharding_logging',
+ '$BUILD_DIR/mongo/db/auth/authmocks',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
+ ]
+)
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 7807503b473..f5570a52507 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h"
#include "mongo/db/s/balancer/cluster_statistics_impl.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog_cache.h"
@@ -381,7 +382,7 @@ void Balancer::_mainThread() {
roundDetails.setSucceeded(static_cast<int>(candidateChunks.size()),
_balancedLastTime);
- shardingContext->catalogClient()
+ ShardingLogging::get(opCtx.get())
->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
.transitional_ignore();
}
@@ -401,7 +402,7 @@ void Balancer::_mainThread() {
// This round failed, tell the world!
roundDetails.setFailed(e.what());
- shardingContext->catalogClient()
+ ShardingLogging::get(opCtx.get())
->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
.transitional_ignore();
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index c775ad19e04..c9d49c974b9 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
@@ -137,12 +138,12 @@ public:
// error.
auto dbType = uassertStatusOK(dbInfo).value;
- uassertStatusOK(
- catalogClient->logChangeChecked(opCtx,
- "dropDatabase.start",
- dbname,
- BSONObj(),
- ShardingCatalogClient::kMajorityWriteConcern));
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
+ opCtx,
+ "dropDatabase.start",
+ dbname,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern));
// Drop the database's collections.
for (const auto& nss : catalogClient->getAllShardedCollectionsForDb(
@@ -174,7 +175,7 @@ public:
uassertStatusOKWithContext(
status, str::stream() << "Could not remove database '" << dbname << "' from metadata");
- catalogClient->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
result.append("dropped", dbname);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 040d4cc03d0..a8635e61bb2 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -456,7 +457,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
appendShortVersion(&logDetail.subobjStart("left"), newChunks[0]);
appendShortVersion(&logDetail.subobjStart("right"), newChunks[1]);
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "split", nss.ns(), logDetail.obj(), WriteConcernOptions());
} else {
BSONObj beforeDetailObj = logDetail.obj();
@@ -470,7 +471,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
chunkDetail.append("of", newChunksSize);
appendShortVersion(&chunkDetail.subobjStart("chunk"), newChunks[i]);
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "multi-split", nss.ns(), chunkDetail.obj(), WriteConcernOptions());
}
}
@@ -583,7 +584,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
collVersion.appendLegacyWithField(&logDetail, "prevShardVersion");
mergeVersion.appendLegacyWithField(&logDetail, "mergedVersion");
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "merge", nss.ns(), logDetail.obj(), WriteConcernOptions());
return Status::OK();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 3d0eaddfa1d..b280c157b74 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/initial_split_policy.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/executor/network_interface.h"
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -166,17 +167,17 @@ void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss)
}
Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
const Status logStatus =
- catalogClient->logChangeChecked(opCtx,
- "dropCollection.start",
- nss.ns(),
- BSONObj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ ShardingLogging::get(opCtx)->logChangeChecked(opCtx,
+ "dropCollection.start",
+ nss.ns(),
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!logStatus.isOK()) {
return logStatus;
}
+ const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardsStatus =
catalogClient->getAllShards(opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (!shardsStatus.isOK()) {
@@ -350,7 +351,7 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
LOG(1) << "dropCollection " << nss.ns() << " completed";
- catalogClient->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "dropCollection", nss.ns(), BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
return Status::OK();
@@ -365,7 +366,6 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
const vector<BSONObj>& splitPoints,
const bool distributeInitialChunks,
const ShardId& dbPrimaryShardId) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
@@ -383,12 +383,12 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
}
collectionDetail.append("primary", primaryShard->toString());
collectionDetail.append("numChunks", static_cast<int>(splitPoints.size() + 1));
- uassertStatusOK(
- catalogClient->logChangeChecked(opCtx,
- "shardCollection.start",
- nss.ns(),
- collectionDetail.obj(),
- ShardingCatalogClient::kMajorityWriteConcern));
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
+ opCtx,
+ "shardCollection.start",
+ nss.ns(),
+ collectionDetail.obj(),
+ ShardingCatalogClient::kMajorityWriteConcern));
}
// Construct the collection default collator.
@@ -445,11 +445,12 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
<< dbPrimaryShardId << causedBy(redact(status));
}
- catalogClient->logChange(opCtx,
- "shardCollection.end",
- nss.ns(),
- BSON("version" << initialChunks.collVersion().toString()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ ShardingLogging::get(opCtx)->logChange(
+ opCtx,
+ "shardCollection.end",
+ nss.ns(),
+ BSON("version" << initialChunks.collVersion().toString()),
+ ShardingCatalogClient::kMajorityWriteConcern);
}
void ShardingCatalogManager::generateUUIDsForExistingShardedCollections(OperationContext* opCtx) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index ac56b8e9d14..76b1ba10087 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -57,6 +57,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/add_shard_cmd_gen.h"
#include "mongo/db/s/add_shard_util.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/wire_version.h"
#include "mongo/executor/task_executor.h"
@@ -759,7 +760,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
shardDetails.append("name", shardType.getName());
shardDetails.append("host", shardConnectionString.toString());
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
// Ensure the added shard is visible to this process.
@@ -812,7 +813,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogManager::removeShard(OperationCon
log() << "going to start draining shard: " << name;
// Record start in changelog
- const Status logStatus = Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ const Status logStatus = ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"removeShard.start",
"",
@@ -884,7 +885,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogManager::removeShard(OperationCon
shardRegistry->reload(opCtx);
// Record finish in changelog
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx, "removeShard", "", BSON("shard" << name), ShardingCatalogClient::kLocalWriteConcern);
return ShardDrainingStatus::COMPLETED;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 799f062f219..d30c52ac2b2 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/s/migration_util.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/shard_metadata_util.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_state_recovery.h"
#include "mongo/db/s/sharding_statistics.h"
@@ -225,7 +226,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
_stats.countDonorMoveChunkStarted.addAndFetch(1);
- const Status logStatus = Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ const Status logStatus = ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"moveChunk.start",
getNss().ns(),
@@ -446,7 +447,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
"against the config server to obtain its latest optime"
<< causedBy(redact(migrationCommitStatus));
- Status status = Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ Status status = ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"moveChunk.validating",
getNss().ns(),
@@ -574,7 +575,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
// scheduling orphan cleanup.
_cleanup(opCtx);
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx,
"moveChunk.commit",
getNss().ns(),
@@ -637,7 +638,7 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
return;
}
- Grid::get(opCtx)->catalogClient()->logChange(
+ ShardingLogging::get(opCtx)->logChange(
opCtx,
"moveChunk.error",
getNss().ns(),
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index ef71c904fc4..90f1cc50cce 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/s/shard_metadata_util.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_state_recovery.h"
#include "mongo/db/s/sharding_statistics.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -77,7 +78,7 @@ Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
log() << "Moving " << _dbname << " primary from: " << _fromShard << " to: " << _toShard;
// Record start in changelog
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"movePrimary.start",
_dbname.toString(),
@@ -225,7 +226,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
"against the config server to obtain its latest optime"
<< causedBy(redact(commitStatus));
- Status validateStatus = Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ Status validateStatus = ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"movePrimary.validating",
getNss().ns(),
@@ -287,7 +288,7 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
_cleanup(opCtx);
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"movePrimary.commit",
_dbname.toString(),
@@ -327,7 +328,7 @@ void MovePrimarySourceManager::cleanupOnError(OperationContext* opCtx) {
}
try {
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->logChangeChecked(
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
opCtx,
"movePrimary.error",
_dbname.toString(),
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 5a588c62c50..37bfa6247d3 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/s/grid.h"
#include "mongo/util/log.h"
@@ -84,11 +85,11 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- Grid::get(_opCtx)->catalogClient()->logChange(_opCtx,
- str::stream() << "moveChunk." << _where,
- _ns,
- _b.obj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ ShardingLogging::get(_opCtx)->logChange(_opCtx,
+ str::stream() << "moveChunk." << _where,
+ _ns,
+ _b.obj(),
+ ShardingCatalogClient::kMajorityWriteConcern);
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where
<< "': " << redact(e.what());
diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp
new file mode 100644
index 00000000000..3529a42cfbd
--- /dev/null
+++ b/src/mongo/db/s/sharding_logging.cpp
@@ -0,0 +1,197 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/s/sharding_logging.h"
+
+#include "mongo/bson/bsonobj.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/server_options.h"
+#include "mongo/executor/network_interface.h"
+#include "mongo/s/catalog/type_changelog.h"
+#include "mongo/s/grid.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+
+namespace {
+
+const std::string kActionLogCollectionName("actionlog");
+const int kActionLogCollectionSizeMB = 20 * 1024 * 1024;
+
+const std::string kChangeLogCollectionName("changelog");
+const int kChangeLogCollectionSizeMB = 200 * 1024 * 1024;
+
+// Global ShardingLogging instance
+const auto shardingLogging = ServiceContext::declareDecoration<ShardingLogging>();
+
+} // namespace
+
+ShardingLogging::ShardingLogging() = default;
+
+ShardingLogging::~ShardingLogging() = default;
+
+ShardingLogging* ShardingLogging::get(ServiceContext* serviceContext) {
+ return &shardingLogging(serviceContext);
+}
+
+ShardingLogging* ShardingLogging::get(OperationContext* operationContext) {
+ return get(operationContext->getServiceContext());
+}
+
+Status ShardingLogging::logAction(OperationContext* opCtx,
+ const StringData what,
+ const StringData ns,
+ const BSONObj& detail) {
+ if (_actionLogCollectionCreated.load() == 0) {
+ Status result = _createCappedConfigCollection(opCtx,
+ kActionLogCollectionName,
+ kActionLogCollectionSizeMB,
+ ShardingCatalogClient::kMajorityWriteConcern);
+ if (result.isOK()) {
+ _actionLogCollectionCreated.store(1);
+ } else {
+ log() << "couldn't create config.actionlog collection:" << causedBy(result);
+ return result;
+ }
+ }
+
+ return _log(opCtx,
+ kActionLogCollectionName,
+ what,
+ ns,
+ detail,
+ ShardingCatalogClient::kMajorityWriteConcern);
+}
+
+Status ShardingLogging::logChangeChecked(OperationContext* opCtx,
+ const StringData what,
+ const StringData ns,
+ const BSONObj& detail,
+ const WriteConcernOptions& writeConcern) {
+ invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
+ writeConcern.wMode == WriteConcernOptions::kMajority);
+ if (_changeLogCollectionCreated.load() == 0) {
+ Status result = _createCappedConfigCollection(
+ opCtx, kChangeLogCollectionName, kChangeLogCollectionSizeMB, writeConcern);
+ if (result.isOK()) {
+ _changeLogCollectionCreated.store(1);
+ } else {
+ log() << "couldn't create config.changelog collection:" << causedBy(result);
+ return result;
+ }
+ }
+
+ return _log(opCtx, kChangeLogCollectionName, what, ns, detail, writeConcern);
+}
+
+Status ShardingLogging::_log(OperationContext* opCtx,
+ const StringData logCollName,
+ const StringData what,
+ const StringData operationNS,
+ const BSONObj& detail,
+ const WriteConcernOptions& writeConcern) {
+ Date_t now = Grid::get(opCtx)->getNetwork()->now();
+ const std::string serverName = str::stream() << Grid::get(opCtx)->getNetwork()->getHostName()
+ << ":" << serverGlobalParams.port;
+ const std::string changeId = str::stream() << serverName << "-" << now.toString() << "-"
+ << OID::gen();
+
+ ChangeLogType changeLog;
+ changeLog.setChangeId(changeId);
+ changeLog.setServer(serverName);
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ changeLog.setShard("config");
+ } else {
+ auto shardingState = ShardingState::get(opCtx);
+ if (shardingState->enabled()) {
+ changeLog.setShard(shardingState->shardId().toString());
+ }
+ }
+ changeLog.setClientAddr(opCtx->getClient()->clientAddress(true));
+ changeLog.setTime(now);
+ changeLog.setNS(operationNS.toString());
+ changeLog.setWhat(what.toString());
+ changeLog.setDetails(detail);
+
+ BSONObj changeLogBSON = changeLog.toBSON();
+ log() << "about to log metadata event into " << logCollName << ": " << redact(changeLogBSON);
+
+ const NamespaceString nss("config", logCollName);
+ Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument(
+ opCtx, nss, changeLogBSON, writeConcern);
+
+ if (!result.isOK()) {
+ warning() << "Error encountered while logging config change with ID [" << changeId
+ << "] into collection " << logCollName << ": " << redact(result);
+ }
+
+ return result;
+}
+
+Status ShardingLogging::_createCappedConfigCollection(OperationContext* opCtx,
+ StringData collName,
+ int cappedSize,
+ const WriteConcernOptions& writeConcern) {
+ BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize
+ << WriteConcernOptions::kWriteConcernField
+ << writeConcern.toBSON());
+
+ auto result =
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "config",
+ createCmd,
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent);
+
+ if (!result.isOK()) {
+ return result.getStatus();
+ }
+
+ if (!result.getValue().commandStatus.isOK()) {
+ if (result.getValue().commandStatus == ErrorCodes::NamespaceExists) {
+ if (result.getValue().writeConcernStatus.isOK()) {
+ return Status::OK();
+ } else {
+ return result.getValue().writeConcernStatus;
+ }
+ } else {
+ return result.getValue().commandStatus;
+ }
+ }
+
+ return result.getValue().writeConcernStatus;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_logging.h b/src/mongo/db/s/sharding_logging.h
new file mode 100644
index 00000000000..3407809d9a4
--- /dev/null
+++ b/src/mongo/db/s/sharding_logging.h
@@ -0,0 +1,110 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/operation_context.h"
+
+namespace mongo {
+
+/**
+ * Diagnostic logging of sharding metadata events (changelog and actionlog).
+ */
+class ShardingLogging {
+
+public:
+ ShardingLogging();
+ ~ShardingLogging();
+
+ /**
+ * Retrieves the ShardingLogging instance associated with the current service/operation context.
+ */
+ static ShardingLogging* get(ServiceContext* serviceContext);
+ static ShardingLogging* get(OperationContext* operationContext);
+
+ Status logAction(OperationContext* opCtx,
+ const StringData what,
+ const StringData ns,
+ const BSONObj& detail);
+
+ Status logChangeChecked(OperationContext* opCtx,
+ const StringData what,
+ const StringData ns,
+ const BSONObj& detail,
+ const WriteConcernOptions& writeConcern);
+
+ void logChange(OperationContext* const opCtx,
+ const StringData what,
+ const StringData ns,
+ const BSONObj& detail,
+ const WriteConcernOptions& writeConcern) {
+ // It is safe to ignore the results of `logChangeChecked` in many cases, as the
+ // failure to log a change is often of no consequence.
+ logChangeChecked(opCtx, what, ns, detail, writeConcern).ignore();
+ }
+
+private:
+ /**
+ * Creates the specified collection name in the config database.
+ */
+ Status _createCappedConfigCollection(OperationContext* opCtx,
+ StringData collName,
+ int cappedSize,
+ const WriteConcernOptions& writeConcern);
+
+ /**
+ * Best effort method, which logs diagnostic events on the config server. If the config server
+ * write fails for any reason a warning will be written to the local service log and the method
+ * will return a failed status.
+ *
+ * @param opCtx Operation context in which the call is running
+ * @param logCollName Which config collection to write to (excluding the database name)
+ * @param what E.g. "split", "migrate" (not interpreted)
+ * @param operationNS To which collection the metadata change is being applied (not interpreted)
+ * @param detail Additional info about the metadata change (not interpreted)
+ * @param writeConcern Write concern options to use for logging
+ */
+ Status _log(OperationContext* opCtx,
+ const StringData logCollName,
+ const StringData what,
+ const StringData operationNSS,
+ const BSONObj& detail,
+ const WriteConcernOptions& writeConcern);
+
+ // Member variable properties:
+ // (S) Self-synchronizing; access in any way from any context.
+
+ // Whether the logAction call should attempt to create the actionlog collection
+ AtomicInt32 _actionLogCollectionCreated{0}; // (S)
+
+ // Whether the logChange call should attempt to create the changelog collection
+ AtomicInt32 _changeLogCollectionCreated{0}; // (S)
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp b/src/mongo/db/s/sharding_logging_test.cpp
index f7d2688a368..5504d76c0d9 100644
--- a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
+++ b/src/mongo/db/s/sharding_logging_test.cpp
@@ -1,4 +1,3 @@
-
/**
* Copyright (C) 2018-present MongoDB, Inc.
*
@@ -36,6 +35,7 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
@@ -180,10 +180,15 @@ protected:
Status log(const std::string& what, const std::string& ns, const BSONObj& detail) {
if (_configCollType == ChangeLog) {
- return catalogClient()->logChangeChecked(
- operationContext(), what, ns, detail, ShardingCatalogClient::kMajorityWriteConcern);
+ return ShardingLogging::get(operationContext())
+ ->logChangeChecked(operationContext(),
+ what,
+ ns,
+ detail,
+ ShardingCatalogClient::kMajorityWriteConcern);
} else {
- return catalogClient()->logAction(operationContext(), what, ns, detail);
+ return ShardingLogging::get(operationContext())
+ ->logAction(operationContext(), what, ns, detail);
}
}
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 6fc39417200..1ba71a79f59 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/write_concern.h"
@@ -253,12 +254,12 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) {
"to retrieve the most recent opTime.";
// Need to fetch the latest uptime from the config server, so do a logging write
- Status status =
- grid->catalogClient()->logChangeChecked(opCtx,
- "Sharding minOpTime recovery",
- NamespaceString::kServerConfigurationNamespace.ns(),
- recoveryDocBSON,
- ShardingCatalogClient::kMajorityWriteConcern);
+ Status status = ShardingLogging::get(opCtx)->logChangeChecked(
+ opCtx,
+ "Sharding minOpTime recovery",
+ NamespaceString::kServerConfigurationNamespace.ns(),
+ recoveryDocBSON,
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index 56e9d516385..c948e0b4eff 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/s/config/initial_split_policy.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
+#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/balancer_configuration.h"
@@ -408,7 +409,6 @@ void shardCollection(OperationContext* opCtx,
const bool fromMapReduce,
const ShardId& dbPrimaryShardId,
const int numContiguousChunksPerShard) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
@@ -428,12 +428,12 @@ void shardCollection(OperationContext* opCtx,
}
collectionDetail.append("primary", primaryShard->toString());
collectionDetail.append("numChunks", static_cast<int>(splitPoints.size() + 1));
- uassertStatusOK(
- catalogClient->logChangeChecked(opCtx,
- "shardCollection.start",
- nss.ns(),
- collectionDetail.obj(),
- ShardingCatalogClient::kMajorityWriteConcern));
+ uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
+ opCtx,
+ "shardCollection.start",
+ nss.ns(),
+ collectionDetail.obj(),
+ ShardingCatalogClient::kMajorityWriteConcern));
}
// Construct the collection default collator.
@@ -538,11 +538,12 @@ void shardCollection(OperationContext* opCtx,
shardsRefreshed.emplace_back(chunk.getShard());
}
- catalogClient->logChange(opCtx,
- "shardCollection.end",
- nss.ns(),
- BSON("version" << initialChunks.collVersion().toString()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ ShardingLogging::get(opCtx)->logChange(
+ opCtx,
+ "shardCollection.end",
+ nss.ns(),
+ BSON("version" << initialChunks.collVersion().toString()),
+ ShardingCatalogClient::kMajorityWriteConcern);
}
/**
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index 88d3a3eb090..f37bf3fefc6 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -138,7 +138,6 @@ env.CppUnitTest(
env.CppUnitTest(
target='sharding_catalog_client_test',
source=[
- 'sharding_catalog_log_change_test.cpp',
'sharding_catalog_test.cpp',
'sharding_catalog_write_retry_test.cpp',
],
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index aa68fcc0ad1..219a623939e 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -270,33 +270,6 @@ public:
repl::ReadConcernLevel readConcern) = 0;
/**
- * Writes a diagnostic event to the action log.
- */
- virtual Status logAction(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) = 0;
-
- /**
- * Writes a diagnostic event to the change log.
- */
- virtual Status logChangeChecked(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) = 0;
-
- void logChange(OperationContext* const opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) {
- // It is safe to ignore the results of `logChangeChecked` in many cases, as the
- // failure to log a change is often of no consequence.
- logChangeChecked(opCtx, what, ns, detail, writeConcern).ignore();
- }
-
- /**
* Reads global sharding settings from the confing.settings collection. The key parameter is
* used as the _id of the respective setting document.
*
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index e50149d3f15..048656f4426 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -53,7 +53,6 @@
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/dist_lock_manager.h"
-#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
@@ -95,12 +94,6 @@ const ReadPreferenceSetting kConfigPrimaryPreferredSelector(ReadPreference::Prim
const int kMaxReadRetry = 3;
const int kMaxWriteRetry = 3;
-const std::string kActionLogCollectionName("actionlog");
-const int kActionLogCollectionSizeMB = 20 * 1024 * 1024;
-
-const std::string kChangeLogCollectionName("changelog");
-const int kChangeLogCollectionSizeMB = 200 * 1024 * 1024;
-
const NamespaceString kSettingsNamespace("config", "settings");
void toBatchError(const Status& status, BatchedCommandResponse* response) {
@@ -153,87 +146,6 @@ Status ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
return status.getStatus().withContext(str::stream() << "Collection metadata write failed");
}
-Status ShardingCatalogClientImpl::logAction(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) {
- if (_actionLogCollectionCreated.load() == 0) {
- Status result = _createCappedConfigCollection(opCtx,
- kActionLogCollectionName,
- kActionLogCollectionSizeMB,
- ShardingCatalogClient::kMajorityWriteConcern);
- if (result.isOK()) {
- _actionLogCollectionCreated.store(1);
- } else {
- log() << "couldn't create config.actionlog collection:" << causedBy(result);
- return result;
- }
- }
-
- return _log(opCtx,
- kActionLogCollectionName,
- what,
- ns,
- detail,
- ShardingCatalogClient::kMajorityWriteConcern);
-}
-
-Status ShardingCatalogClientImpl::logChangeChecked(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) {
- invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
- writeConcern.wMode == WriteConcernOptions::kMajority);
- if (_changeLogCollectionCreated.load() == 0) {
- Status result = _createCappedConfigCollection(
- opCtx, kChangeLogCollectionName, kChangeLogCollectionSizeMB, writeConcern);
- if (result.isOK()) {
- _changeLogCollectionCreated.store(1);
- } else {
- log() << "couldn't create config.changelog collection:" << causedBy(result);
- return result;
- }
- }
-
- return _log(opCtx, kChangeLogCollectionName, what, ns, detail, writeConcern);
-}
-
-Status ShardingCatalogClientImpl::_log(OperationContext* opCtx,
- const StringData& logCollName,
- const std::string& what,
- const std::string& operationNS,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) {
- Date_t now = Grid::get(opCtx)->getNetwork()->now();
- const std::string serverName = str::stream() << Grid::get(opCtx)->getNetwork()->getHostName()
- << ":" << serverGlobalParams.port;
- const std::string changeId = str::stream() << serverName << "-" << now.toString() << "-"
- << OID::gen();
-
- ChangeLogType changeLog;
- changeLog.setChangeId(changeId);
- changeLog.setServer(serverName);
- changeLog.setClientAddr(opCtx->getClient()->clientAddress(true));
- changeLog.setTime(now);
- changeLog.setNS(operationNS);
- changeLog.setWhat(what);
- changeLog.setDetails(detail);
-
- BSONObj changeLogBSON = changeLog.toBSON();
- log() << "about to log metadata event into " << logCollName << ": " << redact(changeLogBSON);
-
- const NamespaceString nss("config", logCollName);
- Status result = insertConfigDocument(opCtx, nss, changeLogBSON, writeConcern);
-
- if (!result.isOK()) {
- warning() << "Error encountered while logging config change with ID [" << changeId
- << "] into collection " << logCollName << ": " << redact(result);
- }
-
- return result;
-}
-
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabase(
OperationContext* opCtx, const std::string& dbName, repl::ReadConcernLevel readConcernLevel) {
if (!NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)) {
@@ -977,43 +889,6 @@ Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx,
return response.toStatus();
}
-Status ShardingCatalogClientImpl::_createCappedConfigCollection(
- OperationContext* opCtx,
- StringData collName,
- int cappedSize,
- const WriteConcernOptions& writeConcern) {
- BSONObj createCmd = BSON("create" << collName << "capped" << true << "size" << cappedSize
- << WriteConcernOptions::kWriteConcernField
- << writeConcern.toBSON());
-
- auto result =
- Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "config",
- createCmd,
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent);
-
- if (!result.isOK()) {
- return result.getStatus();
- }
-
- if (!result.getValue().commandStatus.isOK()) {
- if (result.getValue().commandStatus == ErrorCodes::NamespaceExists) {
- if (result.getValue().writeConcernStatus.isOK()) {
- return Status::OK();
- } else {
- return result.getValue().writeConcernStatus;
- }
- } else {
- return result.getValue().commandStatus;
- }
- }
-
- return result.getValue().writeConcernStatus;
-}
-
StatusWith<repl::OpTimeWith<vector<BSONObj>>> ShardingCatalogClientImpl::_exhaustiveFindOnConfig(
OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index c6f48bb8de5..1e60cede5ee 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -73,17 +73,6 @@ public:
void shutDown(OperationContext* opCtx) override;
- Status logAction(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) override;
-
- Status logChangeChecked(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) override;
-
StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(
OperationContext* opCtx,
const std::string& dbName,
@@ -195,14 +184,6 @@ private:
bool upsert,
const WriteConcernOptions& writeConcern);
- /**
- * Creates the specified collection name in the config database.
- */
- Status _createCappedConfigCollection(OperationContext* opCtx,
- StringData collName,
- int cappedSize,
- const WriteConcernOptions& writeConcern);
-
StatusWith<repl::OpTimeWith<std::vector<BSONObj>>> _exhaustiveFindOnConfig(
OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
@@ -222,32 +203,12 @@ private:
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcernLevel);
- /**
- * Best effort method, which logs diagnostic events on the config server. If the config server
- * write fails for any reason a warning will be written to the local service log and the method
- * will return a failed status.
- *
- * @param opCtx Operation context in which the call is running
- * @param logCollName Which config collection to write to (excluding the database name)
- * @param what E.g. "split", "migrate" (not interpreted)
- * @param operationNS To which collection the metadata change is being applied (not interpreted)
- * @param detail Additional info about the metadata change (not interpreted)
- * @param writeConcern Write concern options to use for logging
- */
- Status _log(OperationContext* opCtx,
- const StringData& logCollName,
- const std::string& what,
- const std::string& operationNSS,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern);
-
//
// All member variables are labeled with one of the following codes indicating the
// synchronization rules for accessing them.
//
// (M) Must hold _mutex for access.
// (R) Read only, can only be written during initialization.
- // (S) Self-synchronizing; access in any way from any context.
//
stdx::mutex _mutex;
@@ -260,12 +221,6 @@ private:
// True if startup() has been called.
bool _started = false; // (M)
-
- // Whether the logAction call should attempt to create the actionlog collection
- AtomicInt32 _actionLogCollectionCreated{0}; // (S)
-
- // Whether the logChange call should attempt to create the changelog collection
- AtomicInt32 _changeLogCollectionCreated{0}; // (S)
};
} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 613ff1a5b68..d415641f6c0 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -143,21 +143,6 @@ Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* opCt
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::logAction(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
-Status ShardingCatalogClientMock::logChangeChecked(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) {
- return {ErrorCodes::InternalError, "Method not implemented"};
-}
-
StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* opCtx,
StringData key) {
return {ErrorCodes::InternalError, "Method not implemented"};
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 9301f8dd034..3ab56cd9a6b 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -103,17 +103,6 @@ public:
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
- Status logAction(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail) override;
-
- Status logChangeChecked(OperationContext* opCtx,
- const std::string& what,
- const std::string& ns,
- const BSONObj& detail,
- const WriteConcernOptions& writeConcern) override;
-
StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override;
StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
diff --git a/src/mongo/s/catalog/type_changelog.cpp b/src/mongo/s/catalog/type_changelog.cpp
index 6d15de7db14..2829027d451 100644
--- a/src/mongo/s/catalog/type_changelog.cpp
+++ b/src/mongo/s/catalog/type_changelog.cpp
@@ -43,6 +43,7 @@ namespace mongo {
const BSONField<std::string> ChangeLogType::changeId("_id");
const BSONField<std::string> ChangeLogType::server("server");
+const BSONField<std::string> ChangeLogType::shard("shard");
const BSONField<std::string> ChangeLogType::clientAddr("clientAddr");
const BSONField<Date_t> ChangeLogType::time("time");
const BSONField<std::string> ChangeLogType::what("what");
@@ -69,6 +70,15 @@ StatusWith<ChangeLogType> ChangeLogType::fromBSON(const BSONObj& source) {
}
{
+ std::string changeLogShard;
+ Status status =
+ bsonExtractStringFieldWithDefault(source, shard.name(), "", &changeLogShard);
+ if (!status.isOK())
+ return status;
+ changeLog._shard = changeLogShard;
+ }
+
+ {
std::string changeLogClientAddr;
Status status = bsonExtractStringField(source, clientAddr.name(), &changeLogClientAddr);
if (!status.isOK())
@@ -142,6 +152,8 @@ BSONObj ChangeLogType::toBSON() const {
builder.append(changeId.name(), getChangeId());
if (_server)
builder.append(server.name(), getServer());
+ if (_shard)
+ builder.append(shard.name(), getShard());
if (_clientAddr)
builder.append(clientAddr.name(), getClientAddr());
if (_time)
@@ -164,6 +176,10 @@ void ChangeLogType::setServer(const std::string& server) {
_server = server;
}
+void ChangeLogType::setShard(const std::string& shard) {
+ _shard = shard;
+}
+
void ChangeLogType::setClientAddr(const std::string& clientAddr) {
_clientAddr = clientAddr;
}
diff --git a/src/mongo/s/catalog/type_changelog.h b/src/mongo/s/catalog/type_changelog.h
index 9f25e57745f..d7b00d2b840 100644
--- a/src/mongo/s/catalog/type_changelog.h
+++ b/src/mongo/s/catalog/type_changelog.h
@@ -48,6 +48,7 @@ public:
// Field names and types in the changelog collection type.
static const BSONField<std::string> changeId;
static const BSONField<std::string> server;
+ static const BSONField<std::string> shard;
static const BSONField<std::string> clientAddr;
static const BSONField<Date_t> time;
static const BSONField<std::string> what;
@@ -86,6 +87,11 @@ public:
}
void setServer(const std::string& server);
+ const std::string& getShard() const {
+ return _shard.get();
+ }
+ void setShard(const std::string& shard);
+
const std::string& getClientAddr() const {
return _clientAddr.get();
}
@@ -116,8 +122,10 @@ private:
// (M) id for this change "<hostname>-<current_time>-<increment>"
boost::optional<std::string> _changeId;
- // (M) hostname of server that we are making the change on. Does not include port.
+ // (M) hostname of server that we are making the change on.
boost::optional<std::string> _server;
+ // (O) id of shard making the change, or "config" for configSvrs
+ boost::optional<std::string> _shard;
// (M) hostname:port of the client that made this change
boost::optional<std::string> _clientAddr;
// (M) time this change was made
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index da5fec257bd..e30c82d0d86 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -48,6 +48,7 @@ TEST(ChangeLogType, Empty) {
TEST(ChangeLogType, Valid) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
<< ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::what("split")
@@ -62,6 +63,7 @@ TEST(ChangeLogType, Valid) {
ASSERT_EQUALS(logEntry.getChangeId(), "host.local-2012-11-21T19:14:10-8");
ASSERT_EQUALS(logEntry.getServer(), "host.local");
+ ASSERT_EQUALS(logEntry.getShard(), "shardname");
ASSERT_EQUALS(logEntry.getClientAddr(), "192.168.0.189:51128");
ASSERT_EQUALS(logEntry.getTime(), Date_t::fromMillisSinceEpoch(1));
ASSERT_EQUALS(logEntry.getWhat(), "split");
@@ -73,6 +75,7 @@ TEST(ChangeLogType, Valid) {
TEST(ChangeLogType, MissingChangeId) {
BSONObj obj = BSON(ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::what("split")
@@ -86,6 +89,7 @@ TEST(ChangeLogType, MissingChangeId) {
TEST(ChangeLogType, MissingServer) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::what("split")
@@ -100,6 +104,7 @@ TEST(ChangeLogType, MissingServer) {
TEST(ChangeLogType, MissingClientAddr) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
<< ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::what("split")
<< ChangeLogType::ns("test.test")
@@ -113,6 +118,7 @@ TEST(ChangeLogType, MissingClientAddr) {
TEST(ChangeLogType, MissingTime) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
<< ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::what("split")
<< ChangeLogType::ns("test.test")
@@ -126,6 +132,7 @@ TEST(ChangeLogType, MissingTime) {
TEST(ChangeLogType, MissingWhat) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
<< ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::ns("test.test")
@@ -136,9 +143,36 @@ TEST(ChangeLogType, MissingWhat) {
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
+TEST(ChangeLogType, MissingNS) {
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
+
+ auto changeLogResult = ChangeLogType::fromBSON(obj);
+ ASSERT_OK(changeLogResult.getStatus());
+ ChangeLogType& logEntry = changeLogResult.getValue();
+ ASSERT_OK(logEntry.validate());
+
+ ASSERT_EQUALS(logEntry.getChangeId(), "host.local-2012-11-21T19:14:10-8");
+ ASSERT_EQUALS(logEntry.getServer(), "host.local");
+ ASSERT_EQUALS(logEntry.getShard(), "shardname");
+ ASSERT_EQUALS(logEntry.getClientAddr(), "192.168.0.189:51128");
+ ASSERT_EQUALS(logEntry.getTime(), Date_t::fromMillisSinceEpoch(1));
+ ASSERT_EQUALS(logEntry.getWhat(), "split");
+ ASSERT_BSONOBJ_EQ(logEntry.getDetails(),
+ BSON("dummy"
+ << "info"));
+}
+
TEST(ChangeLogType, MissingDetails) {
BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
<< ChangeLogType::server("host.local")
+ << ChangeLogType::shard("shardname")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
<< ChangeLogType::what("split")
@@ -148,6 +182,32 @@ TEST(ChangeLogType, MissingDetails) {
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
+TEST(ChangeLogType, MissingShard) {
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
+
+ auto changeLogResult = ChangeLogType::fromBSON(obj);
+ ASSERT_OK(changeLogResult.getStatus());
+ ChangeLogType& logEntry = changeLogResult.getValue();
+ ASSERT_OK(logEntry.validate());
+
+ ASSERT_EQUALS(logEntry.getChangeId(), "host.local-2012-11-21T19:14:10-8");
+ ASSERT_EQUALS(logEntry.getServer(), "host.local");
+ ASSERT_EQUALS(logEntry.getClientAddr(), "192.168.0.189:51128");
+ ASSERT_EQUALS(logEntry.getTime(), Date_t::fromMillisSinceEpoch(1));
+ ASSERT_EQUALS(logEntry.getWhat(), "split");
+ ASSERT_EQUALS(logEntry.getNS(), "test.test");
+ ASSERT_BSONOBJ_EQ(logEntry.getDetails(),
+ BSON("dummy"
+ << "info"));
+}
+
TEST(ChangeLogType, BadType) {
ChangeLogType logEntry;
BSONObj obj = BSON(ChangeLogType::changeId() << 0);