summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml3
-rw-r--r--jstests/core/restart_catalog.js107
-rw-r--r--jstests/core/views/views_all_commands.js1
-rw-r--r--jstests/libs/parallelTester.js3
-rw-r--r--jstests/noPassthrough/restart_catalog_sharded_cluster.js203
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js1
-rw-r--r--src/mongo/db/catalog/SConscript5
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp164
-rw-r--r--src/mongo/db/catalog/catalog_control.h48
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp18
-rw-r--r--src/mongo/db/commands/SConscript2
-rw-r--r--src/mongo/db/commands/restart_catalog_command.cpp106
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/oplog.h8
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp39
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h4
-rw-r--r--src/mongo/db/storage/storage_engine.h9
-rw-r--r--src/mongo/s/commands/SConscript1
-rw-r--r--src/mongo/s/commands/cluster_restart_catalog_command.cpp106
21 files changed, 818 insertions, 18 deletions
diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml
index c003c2ad3ff..1bbd51c229c 100644
--- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml
@@ -18,6 +18,9 @@ selector:
- jstests/core/top.js
- jstests/core/views/views_stats.js
+ # Uses plan cache commands.
+ - jstests/core/restart_catalog.js
+
# TODO SERVER-31249: getLastError should not be affected by no-op retries.
- jstests/core/bulk_legacy_enforce_gle.js
diff --git a/jstests/core/restart_catalog.js b/jstests/core/restart_catalog.js
new file mode 100644
index 00000000000..304f7f6a272
--- /dev/null
+++ b/jstests/core/restart_catalog.js
@@ -0,0 +1,107 @@
+/**
+ * Forces the server to restart the catalog and rebuild its in-memory catalog data structures, then
+ * asserts that the server works normally.
+ * @tags: [assumes_read_concern_unchanged]
+ */
+(function() {
+ "use strict";
+
+ // Helper function for sorting documents in JavaScript.
+ function sortOnId(doc1, doc2) {
+ return bsonWoCompare({_: doc1._id}, {_: doc2._id});
+ }
+
+ const testDB = db.getSiblingDB("restart_catalog");
+ const artistsColl = testDB.getCollection("artists");
+ const songsColl = testDB.getCollection("songs");
+ artistsColl.drop();
+ songsColl.drop();
+
+ // Populate some data into the collection.
+ const artists = [
+ {_id: "beyonce"},
+ {_id: "fenech-soler"},
+ {_id: "gallant"},
+ ];
+ for (let artist of artists) {
+ assert.commandWorked(artistsColl.insert(artist));
+ }
+
+ const songs = [
+ {_id: "flawless", artist: "beyonce", sales: 5000},
+ {_id: "conversation", artist: "fenech-soler", sales: 75.5},
+ {_id: "kaleidoscope", artist: "fenech-soler", sales: 30.0},
+ {_id: "miyazaki", artist: "gallant", sales: 400.3},
+ {_id: "percogesic", artist: "gallant", sales: 550.8},
+ {_id: "shotgun", artist: "gallant", sales: 300.0},
+ ];
+ for (let song of songs) {
+ assert.commandWorked(songsColl.insert(song, {writeConcern: {w: "majority"}}));
+ }
+
+ // Perform some queries.
+ function assertQueriesFindExpectedData() {
+ assert.eq(artistsColl.find().sort({_id: 1}).toArray(), artists);
+ assert.eq(songsColl.find().sort({_id: 1}).toArray(), songs.sort(sortOnId));
+
+ const songsWithLotsOfSales = songs.filter(song => song.sales > 500).sort(sortOnId);
+ assert.eq(songsColl.find({sales: {$gt: 500}}).sort({_id: 1}).toArray(),
+ songsWithLotsOfSales);
+
+ const songsByGallant = songs.filter(song => song.artist === "gallant").sort(sortOnId);
+ assert.eq(songsColl.aggregate([{$match: {artist: "gallant"}}, {$sort: {_id: 1}}]).toArray(),
+ songsByGallant);
+
+ const initialValue = 0;
+ const totalSales = songs.reduce((total, song) => total + song.sales, initialValue);
+ assert.eq(songsColl
+ .aggregate([{$group: {_id: null, totalSales: {$sum: "$sales"}}}],
+ {readConcern: {level: "majority"}})
+ .toArray(),
+ [{_id: null, totalSales: totalSales}]);
+ }
+ assertQueriesFindExpectedData();
+
+ // Remember what indexes are present, then restart the catalog.
+ const songIndexesBeforeRestart = songsColl.getIndexes().sort(sortOnId);
+ const artistIndexesBeforeRestart = artistsColl.getIndexes().sort(sortOnId);
+ assert.commandWorked(db.adminCommand({restartCatalog: 1}));
+
+ // Access the query plan cache. (This makes no assumptions about the state of the plan cache
+ // after restart; however, the database definitely should not crash.)
+ [songsColl, artistsColl].forEach(coll => {
+ assert.commandWorked(coll.runCommand("planCacheListPlans", {query: {_id: 1}}));
+ assert.commandWorked(coll.runCommand("planCacheListQueryShapes"));
+ assert.commandWorked(coll.runCommand("planCacheClear"));
+ });
+
+ // Verify that the data in the collections has not changed.
+ assertQueriesFindExpectedData();
+
+ // Verify that both collections have the same indexes as prior to the restart.
+ const songIndexesAfterRestart = songsColl.getIndexes().sort(sortOnId);
+ assert.eq(songIndexesBeforeRestart, songIndexesAfterRestart);
+ const artistIndexesAfterRestart = artistsColl.getIndexes().sort(sortOnId);
+ assert.eq(artistIndexesBeforeRestart, artistIndexesAfterRestart);
+
+ // Create new indexes and run more queries.
+ assert.commandWorked(songsColl.createIndex({sales: 1}));
+ assert.commandWorked(songsColl.createIndex({artist: 1, sales: 1}));
+ assertQueriesFindExpectedData();
+
+ // Modify an existing collection.
+ assert.commandWorked(artistsColl.runCommand("collMod", {validator: {_id: {$type: "string"}}}));
+ assert.writeErrorWithCode(artistsColl.insert({_id: 7}), ErrorCodes.DocumentValidationFailure);
+
+ // Perform another write, implicitly creating a new collection and database.
+ const secondTestDB = db.getSiblingDB("restart_catalog_2");
+ const foodColl = secondTestDB.getCollection("food");
+ foodColl.drop();
+ const doc = {_id: "apple", category: "fruit"};
+ assert.commandWorked(foodColl.insert(doc));
+ assert.eq(foodColl.find().toArray(), [doc]);
+
+ // Build a new index on the new collection.
+ assert.commandWorked(foodColl.createIndex({category: -1}));
+ assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
+}());
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index dfb57d8a4dc..c2296a17110 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -389,6 +389,7 @@
reapLogicalSessionCacheNow: {skip: isAnInternalCommand},
refreshSessions: {skip: isUnrelated},
refreshSessionsInternal: {skip: isAnInternalCommand},
+ restartCatalog: {skip: isAnInternalCommand},
reIndex: {command: {reIndex: "view"}, expectFailure: true},
removeShard: {skip: isUnrelated},
removeShardFromZone: {skip: isUnrelated},
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index 6c2a547faa8..558814b1847 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -195,6 +195,9 @@ if (typeof _threadInject != "undefined") {
// Views tests
"views/invalid_system_views.js", // Creates invalid view definitions in system.views.
"views/views_all_commands.js", // Drops test DB.
+
+ // Destroys and recreates the catalog, which will interfere with other tests.
+ "restart_catalog.js",
]);
// The following tests cannot run when shell readMode is legacy.
diff --git a/jstests/noPassthrough/restart_catalog_sharded_cluster.js b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
new file mode 100644
index 00000000000..8bef9447c04
--- /dev/null
+++ b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
@@ -0,0 +1,203 @@
+/**
+ * Tests restarting the catalog in a sharded cluster on the config server and the shards.
+ */
+(function() {
+ "use strict";
+
+ // Helper function for sorting documents in JavaScript.
+ function sortOn(fieldName) {
+ return (doc1, doc2) => {
+ return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]});
+ };
+ }
+
+ const st = new ShardingTest({
+ name: "restart_catalog_sharded_cluster",
+ mongos: 1,
+ config: 1,
+ shards: {
+ rs: true,
+ rs0: {nodes: 1},
+ rs1: {nodes: 1},
+ },
+ other: {
+ enableBalancer: false,
+ configOptions: {setParameter: "enableTestCommands=1"},
+ shardOptions: {setParameter: "enableTestCommands=1"},
+ }
+ });
+ const mongos = st.s0;
+ const shard0 = st.shard0;
+ const shard1 = st.shard1;
+
+ const dbName = "drinks";
+
+ // Create a sharded collection and distribute chunks amongst the shards.
+ const coffees = [
+ {_id: "americano", price: 1.5},
+ {_id: "espresso", price: 2.0},
+ {_id: "starbucks", price: 1000.0}
+ ];
+ const coffeeColl = mongos.getDB(dbName).getCollection("coffee");
+ assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shard0.shardName);
+ assert.commandWorked(
+ mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}}));
+ const splitPoint = 50.0;
+ assert.commandWorked(
+ mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}}));
+ for (let coffee of coffees) {
+ assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}}));
+ }
+ assert.commandWorked(mongos.adminCommand({
+ moveChunk: coffeeColl.getFullName(),
+ find: {price: 1000.0},
+ to: shard1.shardName,
+ _waitForDelete: true
+ }));
+ assert.commandWorked(mongos.adminCommand({
+ moveChunk: coffeeColl.getFullName(),
+ find: {price: 0.0},
+ to: shard0.shardName,
+ _waitForDelete: true
+ }));
+
+ // Create an unsharded collection and throw some data in.
+ const teaColl = mongos.getDB(dbName).getCollection("tea");
+ const teas = [
+ {_id: "darjeeling", price: 2.0},
+ {_id: "earl gray", price: 1.5},
+ {_id: "sencha", price: 3.5}
+ ];
+ for (let tea of teas) {
+ assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}}));
+ }
+
+ // Run queries on both the sharded and unsharded collection.
+ function assertShardsHaveExpectedData() {
+ const dbShard0 = shard0.getDB(dbName);
+ const dbShard1 = shard1.getDB(dbName);
+
+ // Assert that we can find all documents in the unsharded collection by either asking
+ // mongos, or consulting the primary shard directly.
+ assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(),
+ teas.sort(sortOn("_id")),
+ "couldn't find all unsharded data via mongos");
+ assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(),
+ teas.sort(sortOn("_id")),
+ "couldn't find all unsharded data directly via primary shard");
+ assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price")));
+
+ // Assert that we can find all documents in the sharded collection via scatter-gather.
+ assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(),
+ coffees.sort(sortOn("_id")),
+ "couldn't find all sharded data via mongos scatter-gather");
+
+ // Assert that we can find all documents via a query that targets multiple shards.
+ assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(),
+ coffees.sort(sortOn("price")),
+ "couldn't find all sharded data via mongos multi-shard targeted query");
+
+ // Assert that we can find all sharded documents on shard0 by shard targeting via mongos,
+ // and by consulting shard0 directly.
+ const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id"));
+ assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(),
+ dataShard0,
+ "couldn't find shard0 data via targeting through mongos");
+ jsTest.log(tojson(dbShard0.getCollectionInfos()));
+ assert.eq(dbShard0.coffee.find().toArray(),
+ dataShard0,
+ "couldn't find shard0 data by directly asking shard0");
+
+ // Assert that we can find all sharded documents on shard1 by shard targeting via mongos,
+ // and by consulting shard1 directly.
+ const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id"));
+ assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(),
+ dataShard1,
+ "couldn't find shard1 data via targeting through mongos");
+ assert.eq(dbShard1.coffee.find().toArray(),
+ dataShard1,
+ "couldn't find shard1 data by directly asking shard1");
+ }
+ assertShardsHaveExpectedData();
+
+ // Run queries on the metadata stored in the config servers.
+ function assertConfigServersHaveExpectedData() {
+ const configDBViaMongos = mongos.getDB("config");
+ const configDBViaConfigSvr = st.config0.getDB("config");
+ const projectOnlyShard = {_id: 0, shard: 1};
+
+ // Assert that we can find documents for chunk metadata, both via mongos and by asking the
+ // config server primary directly.
+ const smallestChunk = {"max.price": splitPoint};
+ const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"};
+ assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(),
+ [smallestChunkShard]);
+ assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(),
+ [smallestChunkShard]);
+
+ const largestChunk = {"min.price": splitPoint};
+ const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"};
+ assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(),
+ [largestChunkShard]);
+ assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(),
+ [largestChunkShard]);
+ }
+ assertConfigServersHaveExpectedData();
+
+ // Restart the catalog on the config server primary, then assert that both collection data and
+ // sharding metadata are as expected.
+ assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1}));
+ assertConfigServersHaveExpectedData();
+ assertShardsHaveExpectedData();
+
+ // Remember what indexes are present, then restart the catalog on all shards via mongos.
+ const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id"));
+ const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
+ assert.commandWorked(mongos.adminCommand({restartCatalog: 1}));
+
+ // Verify that the data in the collections and the metadata have not changed.
+ assertConfigServersHaveExpectedData();
+ assertShardsHaveExpectedData();
+
+ // Verify that both the sharded and unsharded collection have the same indexes as prior to the
+ // restart.
+ const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id"));
+ assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart);
+ const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
+ assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart);
+
+ // Create new indexes on both collections and verify that queries return the same results.
+ [teaColl, coffeeColl].forEach(coll => {
+ assert.commandWorked(coll.createIndex({price: -1}));
+ assert.commandWorked(coll.createIndex({price: 1, _id: 1}));
+ });
+ assertShardsHaveExpectedData();
+
+ // Modify the existing collections.
+ const validator = {price: {$gt: 0}};
+ [teaColl, coffeeColl].forEach(coll => {
+ assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
+ assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure);
+ });
+
+ // Perform another write, implicitly creating a new collection and database.
+ const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2");
+ const foodColl = secondTestDB.getCollection("food");
+ const doc = {_id: "apple", category: "fruit"};
+ assert.commandWorked(foodColl.insert(doc));
+ assert.commandWorked(foodColl.createIndex({category: 1}));
+ assert.eq(foodColl.find().toArray(), [doc]);
+
+ // Shard the new collection and verify we can find its data again.
+ assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()}));
+ assert.commandWorked(
+ mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}}));
+ assert.eq(foodColl.find().toArray(), [doc]);
+
+ // Build a new index on the new collection.
+ assert.commandWorked(foodColl.createIndex({category: -1}));
+ assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
+
+ st.stop();
+}());
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index e25e9ecb7b3..3f3d208fecb 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -312,6 +312,7 @@
replSetUpdatePosition: {skip: "does not return user data"},
replSetResizeOplog: {skip: "does not return user data"},
resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
resync: {skip: "primary only"},
revokePrivilegesFromRole: {skip: "primary only"},
revokeRolesFromRole: {skip: "primary only"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index bdd94b843f6..1374cd7ed25 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -357,6 +357,7 @@
replSetUpdatePosition: {skip: "does not return user data"},
replSetResizeOplog: {skip: "does not return user data"},
resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
resync: {skip: "primary only"},
revokePrivilegesFromRole: {skip: "primary only"},
revokeRolesFromRole: {skip: "primary only"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index ec5eef70c75..2043f08e273 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -318,6 +318,7 @@
replSetUpdatePosition: {skip: "does not return user data"},
replSetResizeOplog: {skip: "does not return user data"},
resetError: {skip: "does not return user data"},
+ restartCatalog: {skip: "internal-only command"},
resync: {skip: "primary only"},
revokePrivilegesFromRole: {skip: "primary only"},
revokeRolesFromRole: {skip: "primary only"},
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 0a66e9ba264..a6f6b3ce69f 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -221,13 +221,14 @@ env.CppUnitTest(
env.Library(
target='catalog_impl',
source=[
+ "catalog_control.cpp",
"collection_compact.cpp",
"collection_impl.cpp",
"collection_info_cache_impl.cpp",
- "database_impl.cpp",
"database_holder_impl.cpp",
- "index_catalog_impl.cpp",
+ "database_impl.cpp",
"index_catalog_entry_impl.cpp",
+ "index_catalog_impl.cpp",
"index_consistency.cpp",
"index_create_impl.cpp",
"private/record_store_validate_adaptor.cpp",
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
new file mode 100644
index 00000000000..a9ca774c9a9
--- /dev/null
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -0,0 +1,164 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/catalog_control.h"
+
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_catalog_entry.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/ftdc/ftdc_mongod.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/repair_database.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace catalog {
+void closeCatalog(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isW());
+
+ // Close all databases.
+ log() << "closeCatalog: closing all databases in dbholder";
+ BSONObjBuilder closeDbsBuilder;
+ constexpr auto force = true;
+ constexpr auto reason = "closing databases for closeCatalog";
+ uassert(40687,
+ str::stream() << "failed to close all databases; result of operation: "
+ << closeDbsBuilder.obj().jsonString(),
+ dbHolder().closeAll(opCtx, closeDbsBuilder, force, reason));
+
+ // Because we've force-closed the database, there should be no databases left open.
+ auto closeDbsResult = closeDbsBuilder.obj();
+ invariant(
+ !closeDbsResult.hasField("nNotClosed"),
+ str::stream() << "expected no databases open after a force close; result of operation: "
+ << closeDbsResult.jsonString());
+
+ // Close the storage engine's catalog.
+ log() << "closeCatalog: closing storage engine catalog";
+ opCtx->getServiceContext()->getGlobalStorageEngine()->closeCatalog(opCtx);
+}
+
+void openCatalog(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isW());
+
+ // Load the catalog in the storage engine.
+ log() << "openCatalog: loading storage engine catalog";
+ auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
+ storageEngine->loadCatalog(opCtx);
+
+ log() << "openCatalog: reconciling catalog and idents";
+ auto indexesToRebuild = storageEngine->reconcileCatalogAndIdents(opCtx);
+ fassertStatusOK(40688, indexesToRebuild.getStatus());
+
+ // Rebuild indexes if necessary.
+ for (auto indexNamespace : indexesToRebuild.getValue()) {
+ NamespaceString collNss(indexNamespace.first);
+ auto indexName = indexNamespace.second;
+
+ auto dbCatalogEntry = storageEngine->getDatabaseCatalogEntry(opCtx, collNss.db());
+ invariant(dbCatalogEntry,
+ str::stream() << "couldn't get database catalog entry for database "
+ << collNss.db());
+ auto collCatalogEntry = dbCatalogEntry->getCollectionCatalogEntry(collNss.toString());
+ invariant(collCatalogEntry,
+ str::stream() << "couldn't get collection catalog entry for collection "
+ << collNss.toString());
+
+ auto indexSpecs = getIndexNameObjs(
+ opCtx, dbCatalogEntry, collCatalogEntry, [&indexName](const std::string& name) {
+ return name == indexName;
+ });
+ if (!indexSpecs.isOK() || indexSpecs.getValue().first.empty()) {
+ fassertStatusOK(40689,
+ {ErrorCodes::InternalError,
+ str::stream() << "failed to get index spec for index " << indexName
+ << " in collection "
+ << collNss.toString()});
+ }
+ auto indexesToRebuild = indexSpecs.getValue();
+ invariant(
+ indexesToRebuild.first.size() == 1,
+ str::stream() << "expected to find a list containing exactly 1 index name, but found "
+ << indexesToRebuild.first.size());
+ invariant(
+ indexesToRebuild.second.size() == 1,
+ str::stream() << "expected to find a list containing exactly 1 index spec, but found "
+ << indexesToRebuild.second.size());
+
+ log() << "openCatalog: rebuilding index " << indexName << " in collection "
+ << collNss.toString();
+ fassertStatusOK(40690,
+ rebuildIndexesOnCollection(
+ opCtx, dbCatalogEntry, collCatalogEntry, std::move(indexesToRebuild)));
+ }
+
+ // Open all databases and repopulate the UUID catalog.
+ log() << "openCatalog: reopening all databases";
+ auto& uuidCatalog = UUIDCatalog::get(opCtx);
+ std::vector<std::string> databasesToOpen;
+ storageEngine->listDatabases(&databasesToOpen);
+ for (auto&& dbName : databasesToOpen) {
+ LOG(1) << "openCatalog: dbholder reopening database " << dbName;
+ auto db = dbHolder().openDb(opCtx, dbName);
+ invariant(db, str::stream() << "failed to reopen database " << dbName);
+
+ std::list<std::string> collections;
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collections);
+ for (auto&& collName : collections) {
+ // Note that the collection name already includes the database component.
+ NamespaceString collNss(collName);
+ auto collection = db->getCollection(opCtx, collName);
+ invariant(collection,
+ str::stream() << "failed to get valid collection pointer for namespace "
+ << collName);
+
+ auto uuid = collection->uuid();
+ // TODO (SERVER-32597): When the minimum featureCompatibilityVersion becomes 3.6, we
+ // can change this condition to be an invariant.
+ if (uuid) {
+ LOG(1) << "openCatalog: registering uuid " << uuid->toString() << " for collection "
+ << collName;
+ uuidCatalog.registerUUIDCatalogEntry(*uuid, collection);
+ }
+
+ // If this is the oplog collection, re-establish the replication system's cached pointer
+ // to the oplog.
+ if (collNss.isOplog()) {
+ log() << "openCatalog: updating cached oplog pointer";
+ repl::establishOplogCollectionForLogging(opCtx, collection);
+ }
+ }
+ }
+}
+} // namespace catalog
+} // namespace mongo
diff --git a/src/mongo/db/catalog/catalog_control.h b/src/mongo/db/catalog/catalog_control.h
new file mode 100644
index 00000000000..85eff55ec25
--- /dev/null
+++ b/src/mongo/db/catalog/catalog_control.h
@@ -0,0 +1,48 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/db/operation_context.h"
+
+namespace mongo {
+namespace catalog {
+/**
+ * Closes the catalog, destroying all associated in-memory data structures for all databases. After
+ * a call to this function, it is illegal to access the catalog before calling openCatalog().
+ *
+ * Must be called with the global lock acquired in exclusive mode.
+ */
+void closeCatalog(OperationContext* opCtx);
+
+/**
+ * Restores the catalog and all in-memory state after a call to closeCatalog().
+ *
+ * Must be called with the global lock acquired in exclusive mode.
+ */
+void openCatalog(OperationContext* opCtx);
+} // namespace catalog
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index af081c9d594..62f424e8b2a 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/repl/oplog.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/util/log.h"
@@ -183,6 +184,15 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, StringData ns, boo
return it->second;
}
+namespace {
+void evictDatabaseFromUUIDCatalog(OperationContext* opCtx, Database* db) {
+ UUIDCatalog::get(opCtx).onCloseDatabase(db);
+ for (auto&& coll : *db) {
+ NamespaceUUIDCache::get(opCtx).evictNamespace(coll->ns());
+ }
+}
+} // namespace
+
void DatabaseHolderImpl::close(OperationContext* opCtx, StringData ns, const std::string& reason) {
invariant(opCtx->lockState()->isW());
@@ -196,10 +206,8 @@ void DatabaseHolderImpl::close(OperationContext* opCtx, StringData ns, const std
}
auto db = it->second;
- UUIDCatalog::get(opCtx).onCloseDatabase(db);
- for (auto&& coll : *db) {
- NamespaceUUIDCache::get(opCtx).evictNamespace(coll->ns());
- }
+ repl::oplogCheckCloseDatabase(opCtx, db);
+ evictDatabaseFromUUIDCatalog(opCtx, db);
db->close(opCtx, reason);
delete db;
@@ -241,6 +249,8 @@ bool DatabaseHolderImpl::closeAll(OperationContext* opCtx,
}
Database* db = _dbs[name];
+ repl::oplogCheckCloseDatabase(opCtx, db);
+ evictDatabaseFromUUIDCatalog(opCtx, db);
db->close(opCtx, reason);
delete db;
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 2bf312a71a5..36107dce430 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -191,6 +191,7 @@ env.Library(
"rename_collection_cmd.cpp",
"repair_cursor.cpp",
"resize_oplog.cpp",
+ "restart_catalog_command.cpp",
"run_aggregate.cpp",
"set_feature_compatibility_version_command.cpp",
"snapshot_management.cpp",
@@ -207,6 +208,7 @@ env.Library(
'$BUILD_DIR/mongo/db/auth/authmongod',
'$BUILD_DIR/mongo/db/background',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ '$BUILD_DIR/mongo/db/catalog/catalog_impl',
'$BUILD_DIR/mongo/db/catalog/collection',
'$BUILD_DIR/mongo/db/catalog/index_key_validate',
'$BUILD_DIR/mongo/db/cloner',
diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp
new file mode 100644
index 00000000000..6fb51d4fe56
--- /dev/null
+++ b/src/mongo/db/commands/restart_catalog_command.cpp
@@ -0,0 +1,106 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include <string>
+#include <vector>
+
+#include "mongo/db/catalog/catalog_control.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+/**
+ * This testing-only command causes the server to close and reopen the catalog, rebuilding all
+ * in-memory data structures.
+ */
+class RestartCatalogCmd final : public BasicCommand {
+public:
+ RestartCatalogCmd() : BasicCommand("restartCatalog") {}
+
+ Status checkAuthForOperation(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ // No auth checks as this is a testing-only command.
+ return Status::OK();
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ bool maintenanceMode() const final {
+ return true;
+ }
+
+ bool maintenanceOk() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool supportsWriteConcern(const BSONObj& cmd) const final {
+ return false;
+ }
+
+ std::string help() const final {
+ return "restart catalog\n"
+ "Internal command for testing only. Closes and restores the catalog, rebuilding\n"
+ "in-memory data structures as needed.\n";
+ }
+
+ bool run(OperationContext* opCtx,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) final {
+ Lock::GlobalLock global(opCtx, MODE_X, UINT_MAX);
+
+ log() << "Closing database catalog";
+ catalog::closeCatalog(opCtx);
+
+ log() << "Reopening database catalog";
+ catalog::openCatalog(opCtx);
+
+ return true;
+ }
+};
+
+MONGO_INITIALIZER(RegisterRestartCatalogCommand)(InitializerContext* ctx) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new RestartCatalogCmd();
+ }
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 124e9c8ed78..16437a0552e 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1687,6 +1687,12 @@ void acquireOplogCollectionForLogging(OperationContext* opCtx) {
}
}
+void establishOplogCollectionForLogging(OperationContext* opCtx, Collection* oplog) {
+ invariant(opCtx->lockState()->isW());
+ invariant(oplog);
+ _localOplogCollection = oplog;
+}
+
void signalOplogWaiters() {
if (_localOplogCollection) {
_localOplogCollection->notifyCappedWaitersIfNeeded();
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index 6a2de1c6bc4..f6ea00d0067 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -155,6 +155,14 @@ void oplogCheckCloseDatabase(OperationContext* opCtx, Database* db);
*/
void acquireOplogCollectionForLogging(OperationContext* opCtx);
+/**
+ * Use 'oplog' as the new cached pointer to the local oplog.
+ *
+ * Called by catalog::openCatalog() to re-establish the oplog collection pointer while holding onto
+ * the global lock in exclusive mode.
+ */
+void establishOplogCollectionForLogging(OperationContext* opCtx, Collection* oplog);
+
using IncrementOpsAppliedStatsFn = stdx::function<void()>;
/**
* Take the object field of a BSONObj, the BSONObj, and the namespace of
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index e7f8ba7d377..436e4a47873 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -83,20 +83,23 @@ KVStorageEngine::KVStorageEngine(
!(options.directoryPerDB && !engine->supportsDirectoryPerDB()));
OperationContextNoop opCtx(_engine->newRecoveryUnit());
+ loadCatalog(&opCtx);
+}
- bool catalogExists = engine->hasIdent(&opCtx, catalogInfo);
-
- if (options.forRepair && catalogExists) {
+void KVStorageEngine::loadCatalog(OperationContext* opCtx) {
+ bool catalogExists = _engine->hasIdent(opCtx, catalogInfo);
+ if (_options.forRepair && catalogExists) {
log() << "Repairing catalog metadata";
// TODO should also validate all BSON in the catalog.
- engine->repairIdent(&opCtx, catalogInfo).transitional_ignore();
+ _engine->repairIdent(opCtx, catalogInfo).transitional_ignore();
}
if (!catalogExists) {
- WriteUnitOfWork uow(&opCtx);
+ WriteUnitOfWork uow(opCtx);
+
+ auto status = _engine->createGroupedRecordStore(
+ opCtx, catalogInfo, catalogInfo, CollectionOptions(), KVPrefix::kNotPrefixed);
- Status status = _engine->createGroupedRecordStore(
- &opCtx, catalogInfo, catalogInfo, CollectionOptions(), KVPrefix::kNotPrefixed);
// BadValue is usually caused by invalid configuration string.
// We still fassert() but without a stack trace.
if (status.code() == ErrorCodes::BadValue) {
@@ -107,10 +110,10 @@ KVStorageEngine::KVStorageEngine(
}
_catalogRecordStore = _engine->getGroupedRecordStore(
- &opCtx, catalogInfo, catalogInfo, CollectionOptions(), KVPrefix::kNotPrefixed);
+ opCtx, catalogInfo, catalogInfo, CollectionOptions(), KVPrefix::kNotPrefixed);
_catalog.reset(new KVCatalog(
_catalogRecordStore.get(), _options.directoryPerDB, _options.directoryForIndexes));
- _catalog->init(&opCtx);
+ _catalog->init(opCtx);
std::vector<std::string> collections;
_catalog->getAllCollections(&collections);
@@ -127,13 +130,25 @@ KVStorageEngine::KVStorageEngine(
db = _databaseCatalogEntryFactory(dbName, this).release();
}
- db->initCollection(&opCtx, coll, options.forRepair);
- auto maxPrefixForCollection = _catalog->getMetaData(&opCtx, coll).getMaxPrefix();
+ db->initCollection(opCtx, coll, _options.forRepair);
+ auto maxPrefixForCollection = _catalog->getMetaData(opCtx, coll).getMaxPrefix();
maxSeenPrefix = std::max(maxSeenPrefix, maxPrefixForCollection);
}
KVPrefix::setLargestPrefix(maxSeenPrefix);
- opCtx.recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
+}
+
+void KVStorageEngine::closeCatalog(OperationContext* opCtx) {
+ dassert(opCtx->lockState()->isLocked());
+ stdx::lock_guard<stdx::mutex> lock(_dbsLock);
+ for (auto entry : _dbs) {
+ delete entry.second;
+ }
+ _dbs.clear();
+
+ _catalog.reset(nullptr);
+ _catalogRecordStore.reset(nullptr);
}
/**
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index c7d98cb7aad..4e08822cc73 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -153,6 +153,10 @@ public:
StatusWith<std::vector<StorageEngine::CollectionIndexNamePair>> reconcileCatalogAndIdents(
OperationContext* opCtx) override;
+ void loadCatalog(OperationContext* opCtx) final;
+
+ void closeCatalog(OperationContext* opCtx) final;
+
private:
using CollIter = std::list<std::string>::iterator;
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 221f009edee..cd881198f03 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -203,6 +203,15 @@ public:
}
/**
+ * Populates and tears down in-memory data structures, respectively. Only required for storage
+ * engines that support recoverToStableTimestamp().
+ *
+ * Must be called with the global lock acquired in exclusive mode.
+ */
+ virtual void loadCatalog(OperationContext* opCtx) {}
+ virtual void closeCatalog(OperationContext* opCtx) {}
+
+ /**
* Closes all file handles associated with a database.
*/
virtual Status closeDatabase(OperationContext* opCtx, StringData db) = 0;
diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript
index 23fe7a7a7d6..3cfa6aa678b 100644
--- a/src/mongo/s/commands/SConscript
+++ b/src/mongo/s/commands/SConscript
@@ -69,6 +69,7 @@ env.Library(
'cluster_remove_shard_from_zone_cmd.cpp',
'cluster_repl_set_get_status_cmd.cpp',
'cluster_reset_error_cmd.cpp',
+ 'cluster_restart_catalog_command.cpp',
'cluster_set_feature_compatibility_version_cmd.cpp',
'cluster_shard_collection_cmd.cpp',
'cluster_shutdown_cmd.cpp',
diff --git a/src/mongo/s/commands/cluster_restart_catalog_command.cpp b/src/mongo/s/commands/cluster_restart_catalog_command.cpp
new file mode 100644
index 00000000000..4caf7b3b127
--- /dev/null
+++ b/src/mongo/s/commands/cluster_restart_catalog_command.cpp
@@ -0,0 +1,106 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/commands.h"
+#include "mongo/s/commands/cluster_commands_helpers.h"
+
+namespace mongo {
+/**
+ * Forwards the testing-only restartCatalog command to all shards.
+ */
+class ClusterRestartCatalogCmd final : public BasicCommand {
+public:
+ ClusterRestartCatalogCmd() : BasicCommand("restartCatalog") {}
+
+ Status checkAuthForOperation(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ // No auth checks as this is a testing-only command.
+ return Status::OK();
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ bool maintenanceMode() const final {
+ return true;
+ }
+
+ bool maintenanceOk() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool supportsWriteConcern(const BSONObj& cmd) const final {
+ return false;
+ }
+
+ std::string help() const final {
+ return "restart catalog\n"
+ "Internal command for testing only. Forwards the restartCatalog command to\n"
+ "all shards.\n";
+ }
+
+ bool run(OperationContext* opCtx,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) final {
+ // This command doesn't operate on a collection namespace, so just pass in an empty
+ // NamespaceString.
+ const auto namespaceStringForCommand = boost::none;
+ auto shardResponses = scatterGatherUnversionedTargetAllShards(
+ opCtx,
+ db,
+ namespaceStringForCommand,
+ CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kIdempotent);
+
+ std::string errmsg;
+ appendRawResponses(opCtx, &errmsg, &result, shardResponses);
+
+ // Intentionally not adding the error message to 'result', as it will already contain all
+ // the errors from the shards in a field named 'raw'.
+ return errmsg.length() > 0 ? false : true;
+ }
+};
+
+MONGO_INITIALIZER(RegisterClusterRestartCatalogCommand)(InitializerContext* ctx) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new ClusterRestartCatalogCmd();
+ }
+ return Status::OK();
+}
+} // namespace mongo