diff options
24 files changed, 2 insertions, 801 deletions
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_kill_primary_jscore_passthrough.yml index 930cefb51f8..c87c59009e6 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_kill_primary_jscore_passthrough.yml @@ -21,9 +21,6 @@ selector: - jstests/core/top.js - jstests/core/views/views_stats.js - # Uses plan cache commands. - - jstests/core/restart_catalog.js - # TODO SERVER-31249: getLastError should not be affected by no-op retries. - jstests/core/bulk_legacy_enforce_gle.js diff --git a/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml index d41f43bc0a1..870e55db89e 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml @@ -21,9 +21,6 @@ selector: - jstests/core/top.js - jstests/core/views/views_stats.js - # Uses plan cache commands. - - jstests/core/restart_catalog.js - # TODO SERVER-31249: getLastError should not be affected by no-op retries. - jstests/core/bulk_legacy_enforce_gle.js diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml index 82b57754c37..0db6dd53eb7 100644 --- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml @@ -21,9 +21,6 @@ selector: - jstests/core/top.js - jstests/core/views/views_stats.js - # Uses plan cache commands. - - jstests/core/restart_catalog.js - # TODO SERVER-31249: getLastError should not be affected by no-op retries. - jstests/core/bulk_legacy_enforce_gle.js diff --git a/jstests/core/restart_catalog.js b/jstests/core/restart_catalog.js deleted file mode 100644 index 9062c8132da..00000000000 --- a/jstests/core/restart_catalog.js +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Forces the server to restart the catalog and rebuild its in-memory catalog data structures, then - * asserts that the server works normally. - * @tags: [ - * assumes_read_concern_unchanged, requires_majority_read_concern, - * - * # restartCatalog command is not available on embedded - * incompatible_with_embedded, - * - * # This test assumes that reads happen on the same node as the 'restartCatalog' command. - * assumes_read_preference_unchanged - * ] - */ -(function() { -"use strict"; - -// Only run this test if the storage engine is "wiredTiger" or "inMemory". -const acceptedStorageEngines = ["wiredTiger", "inMemory"]; -const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger"; -if (!acceptedStorageEngines.includes(currentStorageEngine)) { - jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine + - " storage engine"); - return; -} - -// Helper function for sorting documents in JavaScript. -function sortOnId(doc1, doc2) { - return bsonWoCompare({_: doc1._id}, {_: doc2._id}); -} - -const testDB = db.getSiblingDB("restart_catalog"); -const artistsColl = testDB.getCollection("artists"); -const songsColl = testDB.getCollection("songs"); -artistsColl.drop(); -songsColl.drop(); - -// Populate some data into the collection. -const artists = [ - {_id: "beyonce"}, - {_id: "fenech-soler"}, - {_id: "gallant"}, -]; -for (let artist of artists) { - assert.commandWorked(artistsColl.insert(artist)); -} - -const songs = [ - {_id: "flawless", artist: "beyonce", sales: 5000}, - {_id: "conversation", artist: "fenech-soler", sales: 75.5}, - {_id: "kaleidoscope", artist: "fenech-soler", sales: 30.0}, - {_id: "miyazaki", artist: "gallant", sales: 400.3}, - {_id: "percogesic", artist: "gallant", sales: 550.8}, - {_id: "shotgun", artist: "gallant", sales: 300.0}, -]; -for (let song of songs) { - assert.commandWorked(songsColl.insert(song, {writeConcern: {w: "majority"}})); -} - -// Perform some queries. -function assertQueriesFindExpectedData() { - assert.eq(artistsColl.find().sort({_id: 1}).toArray(), artists); - assert.eq(songsColl.find().sort({_id: 1}).toArray(), songs.sort(sortOnId)); - - const songsWithLotsOfSales = songs.filter(song => song.sales > 500).sort(sortOnId); - assert.eq(songsColl.find({sales: {$gt: 500}}).sort({_id: 1}).toArray(), songsWithLotsOfSales); - - const songsByGallant = songs.filter(song => song.artist === "gallant").sort(sortOnId); - assert.eq(songsColl.aggregate([{$match: {artist: "gallant"}}, {$sort: {_id: 1}}]).toArray(), - songsByGallant); - - const initialValue = 0; - const totalSales = songs.reduce((total, song) => total + song.sales, initialValue); - assert.eq(songsColl - .aggregate([{$group: {_id: null, totalSales: {$sum: "$sales"}}}], - {readConcern: {level: "majority"}}) - .toArray(), - [{_id: null, totalSales: totalSales}]); -} -assertQueriesFindExpectedData(); - -// Remember what indexes are present, then restart the catalog. -const songIndexesBeforeRestart = songsColl.getIndexes().sort(sortOnId); -const artistIndexesBeforeRestart = artistsColl.getIndexes().sort(sortOnId); -assert.commandWorked(db.adminCommand({restartCatalog: 1})); - -// Access the query plan cache. (This makes no assumptions about the state of the plan cache -// after restart; however, the database definitely should not crash.) -[songsColl, artistsColl].forEach(coll => { - assert.commandWorked(coll.runCommand("planCacheClear")); -}); - -// Verify that the data in the collections has not changed. -assertQueriesFindExpectedData(); - -// Verify that both collections have the same indexes as prior to the restart. -const songIndexesAfterRestart = songsColl.getIndexes().sort(sortOnId); -assert.eq(songIndexesBeforeRestart, songIndexesAfterRestart); -const artistIndexesAfterRestart = artistsColl.getIndexes().sort(sortOnId); -assert.eq(artistIndexesBeforeRestart, artistIndexesAfterRestart); - -// Create new indexes and run more queries. -assert.commandWorked(songsColl.createIndex({sales: 1})); -assert.commandWorked(songsColl.createIndex({artist: 1, sales: 1})); -assertQueriesFindExpectedData(); - -// Modify an existing collection. -assert.commandWorked(artistsColl.runCommand("collMod", {validator: {_id: {$type: "string"}}})); -assert.writeErrorWithCode(artistsColl.insert({_id: 7}), ErrorCodes.DocumentValidationFailure); - -// Perform another write, implicitly creating a new collection and database. -const secondTestDB = db.getSiblingDB("restart_catalog_2"); -const foodColl = secondTestDB.getCollection("food"); -foodColl.drop(); -const doc = { - _id: "apple", - category: "fruit" -}; -assert.commandWorked(foodColl.insert(doc)); -assert.eq(foodColl.find().toArray(), [doc]); - -// Build a new index on the new collection. -assert.commandWorked(foodColl.createIndex({category: -1})); -assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]); - -// The restartCatalog command kills all cursors. Test that a getMore on a cursor that existed -// during restartCatalog fails with the appropriate error code. We insert a second document so -// that we can make a query happen in two batches. -assert.commandWorked(foodColl.insert({_id: "orange"})); -let cursorResponse = assert.commandWorked( - secondTestDB.runCommand({find: foodColl.getName(), filter: {}, batchSize: 1})); -assert.eq(cursorResponse.cursor.firstBatch.length, 1); -assert.neq(cursorResponse.cursor.id, 0); -assert.commandWorked(secondTestDB.adminCommand({restartCatalog: 1})); -assert.commandFailedWithCode( - secondTestDB.runCommand({getMore: cursorResponse.cursor.id, collection: foodColl.getName()}), - ErrorCodes.QueryPlanKilled); -}()); diff --git a/jstests/core/views/view_with_invalid_dbname.js b/jstests/core/views/view_with_invalid_dbname.js index 76812670b98..0eec4595b33 100644 --- a/jstests/core/views/view_with_invalid_dbname.js +++ b/jstests/core/views/view_with_invalid_dbname.js @@ -2,8 +2,7 @@ // invalid name. This test is specifically for the case of a view with a dbname that contains an // embedded null character (SERVER-36859). // -// The 'restartCatalog' command is not available on embedded. -// @tags: [ incompatible_with_embedded, SERVER-38379 ] +// @tags: [ SERVER-38379 ] (function() { "use strict"; @@ -21,13 +20,6 @@ const viewDef = { try { assert.commandWorked(db.system.views.insert(viewDef)); - - // If the reinitialization of the durable view catalog tries to create a NamespaceString using - // the 'viewName' field, it will throw an exception in a place that is not exception safe, - // resulting in an invariant failure. This previously occurred because validation was only - // checking the collection part of the namespace, not the dbname part. With correct validation - // in place, reinitialization succeeds despite the invalid name. - assert.commandWorked(db.adminCommand({restartCatalog: 1})); } finally { // Don't let the bogus view stick around, or else it will cause an error in validation. var result = db.system.views.deleteOne({_id: viewName}); diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js index 2c09075cfc7..6aa34c019ef 100644 --- a/jstests/core/views/views_all_commands.js +++ b/jstests/core/views/views_all_commands.js @@ -432,7 +432,6 @@ let viewsCommandTests = { refreshLogicalSessionCacheNow: {skip: isAnInternalCommand}, reapLogicalSessionCacheNow: {skip: isAnInternalCommand}, refreshSessions: {skip: isUnrelated}, - restartCatalog: {skip: isAnInternalCommand}, reIndex: {command: {reIndex: "view"}, expectFailure: true}, removeShard: {skip: isUnrelated}, removeShardFromZone: {skip: isUnrelated}, diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js index 22b9277042d..b6c503b4823 100644 --- a/jstests/libs/parallelTester.js +++ b/jstests/libs/parallelTester.js @@ -205,9 +205,6 @@ if (typeof _threadInject != "undefined") { "views/views_all_commands.js", // Drops test DB. "views/view_with_invalid_dbname.js", // Puts invalid view definitions in system.views. - // Destroys and recreates the catalog, which will interfere with other tests. - "restart_catalog.js", - // This test works close to the BSON document limit for entries in the durable catalog, // so running it in parallel with other tests will cause failures. "long_collection_names.js", diff --git a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js b/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js deleted file mode 100644 index 420c3446234..00000000000 --- a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js +++ /dev/null @@ -1,52 +0,0 @@ -/** - * SERVER-35671: Ensure that if database has background operations it can't be closed and that - * attempting to close it won't leave it in an inconsistant state. - * - * @tags: [requires_replication, uses_transactions] - */ - -(function() { -"use strict"; - -load("jstests/noPassthrough/libs/index_build.js"); - -let replSet = new ReplSetTest({name: "server35671", nodes: 1}); -let setFailpointBool = (testDB, failpointName, alwaysOn, times) => { - if (times) { - return testDB.adminCommand({configureFailPoint: failpointName, mode: {"times": times}}); - } else if (alwaysOn) { - return testDB.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"}); - } else { - return testDB.adminCommand({configureFailPoint: failpointName, mode: "off"}); - } -}; -replSet.startSet(); -replSet.initiate(); -let testDB = replSet.getPrimary().getDB("test"); -// This test depends on using the IndexBuildsCoordinator to build this index, which as of -// SERVER-44405, will not occur in this test unless the collection is created beforehand. -assert.commandWorked(testDB.runCommand({create: "coll"})); - -// Insert document into collection to avoid optimization for index creation on an empty collection. -// This allows us to pause index builds on the collection using a fail point. -assert.commandWorked(testDB.getCollection("coll").insert({a: 1})); - -setFailpointBool(testDB, "hangAfterStartingIndexBuildUnlocked", true); - -// Blocks because of failpoint -let join = startParallelShell( - "db.getSiblingDB('test').coll.createIndex({a: 1, b: 1}, {background: true})", replSet.ports[0]); - -// Let the createIndex start to run. -IndexBuildTest.waitForIndexBuildToScanCollection(testDB, "coll", "a_1_b_1"); - -// Repeated calls should continue to fail without crashing. -assert.commandFailed(testDB.adminCommand({restartCatalog: 1})); -assert.commandFailed(testDB.adminCommand({restartCatalog: 1})); -assert.commandFailed(testDB.adminCommand({restartCatalog: 1})); - -// Unset failpoint so we can join the parallel shell. -setFailpointBool(testDB, "hangAfterStartingIndexBuildUnlocked", false); -join(); -replSet.stopSet(); -})(); diff --git a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js b/jstests/noPassthrough/restart_catalog_preserves_min_visible.js deleted file mode 100644 index 18127ce27d1..00000000000 --- a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Restarting the catalog will destroy and recreate all database/collection/index objects from the - * storage engine state. However, some fields like the `minimumVisibleSnapshot` timestamp are not - * persisted to storage. This value is typically rehydrated when performing replication - * recovery. However there are cases where reads can be at a timestamp prior to where replication - * recovery begins. Those are fixed by copying the previous value over from the destroyed catalog - * object to the recreated one. - * - * This test verifies the collection's minimum visible snapshot timestamp is appropriately copied - * over. - * - * @tags: [requires_replication] - */ -(function() { -"use strict"; - -let replSet = new ReplSetTest({name: "server35317", nodes: 1}); -replSet.startSet(); -replSet.initiate(); - -let prim = replSet.getPrimary(); -let beforeIndexBuild = assert.commandWorked(prim.adminCommand({ - configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely", - mode: "alwaysOn" -}))["operationTime"]; -assert.commandWorked(prim.getDB("test").coll.insert({c: 1})); -assert.commandWorked(prim.getDB("test").coll.createIndex({c: 1})); -assert.commandWorked(prim.adminCommand({restartCatalog: 1})); - -let session = prim.startSession({causalConsistency: false}); -let sessionDb = session.getDatabase("test"); -// Prior to fixing SERVER-35317, this would crash a debug build, or return success on a -// non-debug build. Now it should return an error. Specifically, this fails because we're -// trying to read behind the minimum visible snapshot timestamp for the `test.coll` -// collection. -assert.commandFailed(sessionDb.runCommand({ - find: "coll", - filter: {c: 1}, - readConcern: {level: "snapshot", atClusterTime: beforeIndexBuild}, - txnNumber: NumberLong(0) -})); - -session.endSession(); -replSet.stopSet(); -})(); diff --git a/jstests/noPassthrough/restart_catalog_sharded_cluster.js b/jstests/noPassthrough/restart_catalog_sharded_cluster.js deleted file mode 100644 index 782fa9aa913..00000000000 --- a/jstests/noPassthrough/restart_catalog_sharded_cluster.js +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Tests restarting the catalog in a sharded cluster on the config server and the shards. - * @tags: [requires_replication, requires_sharding, requires_majority_read_concern] - */ -(function() { -"use strict"; - -// Only run this test if the storage engine is "wiredTiger" or "inMemory". -const acceptedStorageEngines = ["wiredTiger", "inMemory"]; -const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger"; -if (!acceptedStorageEngines.includes(currentStorageEngine)) { - jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine + - " storage engine"); - return; -} - -// Helper function for sorting documents in JavaScript. -function sortOn(fieldName) { - return (doc1, doc2) => { - return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]}); - }; -} - -const st = new ShardingTest({ - name: "restart_catalog_sharded_cluster", - mongos: 1, - config: 1, - shards: { - rs: true, - rs0: {nodes: 1}, - rs1: {nodes: 1}, - }, - other: { - enableBalancer: false, - configOptions: {setParameter: "enableTestCommands=1"}, - shardOptions: {setParameter: "enableTestCommands=1"}, - } -}); -const mongos = st.s0; -const shard0 = st.shard0; -const shard1 = st.shard1; - -const dbName = "drinks"; - -// Create a sharded collection and distribute chunks amongst the shards. -const coffees = [ - {_id: "americano", price: 1.5}, - {_id: "espresso", price: 2.0}, - {_id: "starbucks", price: 1000.0} -]; -const coffeeColl = mongos.getDB(dbName).getCollection("coffee"); -assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); -st.ensurePrimaryShard(dbName, shard0.shardName); -assert.commandWorked( - mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}})); -const splitPoint = 50.0; -assert.commandWorked( - mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}})); -for (let coffee of coffees) { - assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}})); -} -assert.commandWorked(mongos.adminCommand({ - moveChunk: coffeeColl.getFullName(), - find: {price: 1000.0}, - to: shard1.shardName, - _waitForDelete: true -})); -assert.commandWorked(mongos.adminCommand({ - moveChunk: coffeeColl.getFullName(), - find: {price: 0.0}, - to: shard0.shardName, - _waitForDelete: true -})); - -// Create an unsharded collection and throw some data in. -const teaColl = mongos.getDB(dbName).getCollection("tea"); -const teas = - [{_id: "darjeeling", price: 2.0}, {_id: "earl gray", price: 1.5}, {_id: "sencha", price: 3.5}]; -for (let tea of teas) { - assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}})); -} - -// Run queries on both the sharded and unsharded collection. -function assertShardsHaveExpectedData() { - const dbShard0 = shard0.getDB(dbName); - const dbShard1 = shard1.getDB(dbName); - - // Assert that we can find all documents in the unsharded collection by either asking - // mongos, or consulting the primary shard directly. - assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(), - teas.sort(sortOn("_id")), - "couldn't find all unsharded data via mongos"); - assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(), - teas.sort(sortOn("_id")), - "couldn't find all unsharded data directly via primary shard"); - assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price"))); - - // Assert that we can find all documents in the sharded collection via scatter-gather. - assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(), - coffees.sort(sortOn("_id")), - "couldn't find all sharded data via mongos scatter-gather"); - - // Assert that we can find all documents via a query that targets multiple shards. - assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(), - coffees.sort(sortOn("price")), - "couldn't find all sharded data via mongos multi-shard targeted query"); - - // Assert that we can find all sharded documents on shard0 by shard targeting via mongos, - // and by consulting shard0 directly. - const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id")); - assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(), - dataShard0, - "couldn't find shard0 data via targeting through mongos"); - jsTest.log(tojson(dbShard0.getCollectionInfos())); - assert.eq(dbShard0.coffee.find().toArray(), - dataShard0, - "couldn't find shard0 data by directly asking shard0"); - - // Assert that we can find all sharded documents on shard1 by shard targeting via mongos, - // and by consulting shard1 directly. - const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id")); - assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(), - dataShard1, - "couldn't find shard1 data via targeting through mongos"); - assert.eq(dbShard1.coffee.find().toArray(), - dataShard1, - "couldn't find shard1 data by directly asking shard1"); -} -assertShardsHaveExpectedData(); - -// Run queries on the metadata stored in the config servers. -function assertConfigServersHaveExpectedData() { - const configDBViaMongos = mongos.getDB("config"); - const configDBViaConfigSvr = st.config0.getDB("config"); - const projectOnlyShard = {_id: 0, shard: 1}; - - // Assert that we can find documents for chunk metadata, both via mongos and by asking the - // config server primary directly. - const smallestChunk = {"max.price": splitPoint}; - const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"}; - assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(), - [smallestChunkShard]); - assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(), - [smallestChunkShard]); - - const largestChunk = {"min.price": splitPoint}; - const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"}; - assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(), - [largestChunkShard]); - assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(), - [largestChunkShard]); -} -assertConfigServersHaveExpectedData(); - -// Restart the catalog on the config server primary, then assert that both collection data and -// sharding metadata are as expected. -assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1})); -assertConfigServersHaveExpectedData(); -assertShardsHaveExpectedData(); - -// Remember what indexes are present, then restart the catalog on all shards via mongos. -const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id")); -const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id")); -assert.commandWorked(mongos.adminCommand({restartCatalog: 1})); - -// Verify that the data in the collections and the metadata have not changed. -assertConfigServersHaveExpectedData(); -assertShardsHaveExpectedData(); - -// Verify that both the sharded and unsharded collection have the same indexes as prior to the -// restart. -const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id")); -assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart); -const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id")); -assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart); - -// Create new indexes on both collections and verify that queries return the same results. -[teaColl, coffeeColl].forEach(coll => { - assert.commandWorked(coll.createIndex({price: -1})); - assert.commandWorked(coll.createIndex({price: 1, _id: 1})); -}); -assertShardsHaveExpectedData(); - -// Modify the existing collections. -const validator = { - price: {$gt: 0} -}; -[teaColl, coffeeColl].forEach(coll => { - assert.commandWorked(coll.runCommand("collMod", {validator: validator})); - assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure); -}); - -// Perform another write, implicitly creating a new collection and database. -const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2"); -const foodColl = secondTestDB.getCollection("food"); -const doc = { - _id: "apple", - category: "fruit" -}; -assert.commandWorked(foodColl.insert(doc)); -assert.commandWorked(foodColl.createIndex({category: 1})); -assert.eq(foodColl.find().toArray(), [doc]); - -// Shard the new collection and verify we can find its data again. -assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()})); -assert.commandWorked( - mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}})); -assert.eq(foodColl.find().toArray(), [doc]); - -// Build a new index on the new collection. -assert.commandWorked(foodColl.createIndex({category: -1})); -assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]); - -st.stop(); -}()); diff --git a/jstests/noPassthrough/rollback_wt_drop.js b/jstests/noPassthrough/rollback_wt_drop.js index 8c235695439..7e9fc7b35e2 100644 --- a/jstests/noPassthrough/rollback_wt_drop.js +++ b/jstests/noPassthrough/rollback_wt_drop.js @@ -93,9 +93,6 @@ let RollbackOps = (node) => { assert.commandWorked(mydb.createCollection(tempColl.getName())); assert.commandWorked(tempColl.insert({_id: 100, y: 100})); assert(tempColl.drop()); - - // restartCatalog should not remove drop-pending idents. - assert.commandWorked(mydb.adminCommand({restartCatalog: 1})); }; // Set up Rollback Test. diff --git a/jstests/noPassthroughWithMongod/restart_catalog_interrupts_background_validation.js b/jstests/noPassthroughWithMongod/restart_catalog_interrupts_background_validation.js deleted file mode 100644 index 99d79a847f7..00000000000 --- a/jstests/noPassthroughWithMongod/restart_catalog_interrupts_background_validation.js +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Verifies that background validation is interrupted when the `restartCatalog` command is - * executed. - * - * Only run this against WiredTiger, which supports checkpoint cursors. - * @tags: [requires_wiredtiger, requires_persistence] - */ -(function() { -"use strict"; - -const dbName = "restart_catalog_interrupts_background_validation"; -const collName = "test"; - -let testDb = db.getSiblingDB(dbName); -let testColl = testDb.getCollection(collName); -testColl.drop(); - -const setFailpoint = () => { - assert.commandWorked(testDb.adminCommand( - {configureFailPoint: "hangDuringYieldingLocksForValidation", mode: "alwaysOn"})); -}; - -const unsetFailpoint = () => { - assert.commandWorked(testDb.adminCommand( - {configureFailPoint: "hangDuringYieldingLocksForValidation", mode: "off"})); -}; - -const waitUntilFailpoint = () => { - checkLog.contains(testDb.getMongo(), - "Hanging on fail point 'hangDuringYieldingLocksForValidation'"); -}; - -const setupCollection = () => { - // Clear the log to get rid of any existing fail point logging that will be used to hang on. - assert.commandWorked(testDb.adminCommand({clearLog: 'global'})); - - assert.commandWorked(testColl.createIndex({x: 1})); - - // Insert 10,000 documents because validation will yield every 4096 entries fetched. - const docsToInsert = 10000; - var bulk = testColl.initializeUnorderedBulkOp(); - for (var i = 0; i < docsToInsert; i++) { - bulk.insert({x: i}); - } - assert.commandWorked(bulk.execute()); - - // Create a checkpoint of the data. - assert.commandWorked(testDb.fsyncLock()); - assert.commandWorked(testDb.fsyncUnlock()); -}; - -// Create an index, insert some test data and force a checkpoint. -setupCollection(); - -let awaitBackgroundValidationFailed; -try { - setFailpoint(); - awaitBackgroundValidationFailed = startParallelShell(function() { - assert.commandFailedWithCode( - db.getSiblingDB("restart_catalog_interrupts_background_validation") - .runCommand({validate: "test", background: true}), - ErrorCodes.Interrupted); - }); - - waitUntilFailpoint(); - assert.commandWorked(db.adminCommand({restartCatalog: 1})); -} finally { - unsetFailpoint(); -} - -awaitBackgroundValidationFailed(); -}()); diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js index d12db6a8747..691f2146821 100644 --- a/jstests/replsets/drop_databases_two_phase.js +++ b/jstests/replsets/drop_databases_two_phase.js @@ -125,10 +125,6 @@ assert.commandFailedWithCode( ErrorCodes.DatabaseDropPending, 'collection creation should fail while we are in the process of dropping the database'); -assert.commandFailedWithCode(dbToDrop.adminCommand('restartCatalog'), - ErrorCodes.DatabaseDropPending, - 'restartCatalog should fail if any databases are marked drop-pending'); - /** * DROP DATABASE 'Database' PHASE */ diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js index ab19abfd907..fdf8866c397 100644 --- a/jstests/sharding/database_versioning_all_commands.js +++ b/jstests/sharding/database_versioning_all_commands.js @@ -592,7 +592,6 @@ let testCases = { }, replSetGetStatus: {skip: "not supported in mongos"}, resetError: {skip: "not on a user database"}, - restartCatalog: {skip: "not on a user database"}, revokePrivilegesFromRole: {skip: "always targets the config server"}, revokeRolesFromRole: {skip: "always targets the config server"}, revokeRolesFromUser: {skip: "always targets the config server"}, diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js index 7093166e5e5..be80edd928f 100644 --- a/jstests/sharding/read_write_concern_defaults_application.js +++ b/jstests/sharding/read_write_concern_defaults_application.js @@ -522,7 +522,6 @@ let testCases = { replSetTest: {skip: "does not accept read or write concern"}, replSetUpdatePosition: {skip: "does not accept read or write concern"}, resetError: {skip: "does not accept read or write concern"}, - restartCatalog: {skip: "internal command"}, resync: {skip: "does not accept read or write concern"}, revokePrivilegesFromRole: { setUp: function(conn) { diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js index 32c3ce26dba..85f3bc6e19b 100644 --- a/jstests/sharding/safe_secondary_reads_drop_recreate.js +++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js @@ -275,7 +275,6 @@ let testCases = { replSetUpdatePosition: {skip: "does not return user data"}, replSetResizeOplog: {skip: "does not return user data"}, resetError: {skip: "does not return user data"}, - restartCatalog: {skip: "internal-only command"}, resync: {skip: "primary only"}, revokePrivilegesFromRole: {skip: "primary only"}, revokeRolesFromRole: {skip: "primary only"}, diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js index 9cf6020b056..2010ddb25c9 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js @@ -347,7 +347,6 @@ let testCases = { replSetUpdatePosition: {skip: "does not return user data"}, replSetResizeOplog: {skip: "does not return user data"}, resetError: {skip: "does not return user data"}, - restartCatalog: {skip: "internal-only command"}, resync: {skip: "primary only"}, revokePrivilegesFromRole: {skip: "primary only"}, revokeRolesFromRole: {skip: "primary only"}, diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js index 9258ba9c4f0..c7cd0d7ca89 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js @@ -282,7 +282,6 @@ let testCases = { replSetUpdatePosition: {skip: "does not return user data"}, replSetResizeOplog: {skip: "does not return user data"}, resetError: {skip: "does not return user data"}, - restartCatalog: {skip: "internal-only command"}, resync: {skip: "primary only"}, revokePrivilegesFromRole: {skip: "primary only"}, revokeRolesFromRole: {skip: "primary only"}, diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript index 74228e077eb..740d41d2bd6 100644 --- a/src/mongo/db/commands/SConscript +++ b/src/mongo/db/commands/SConscript @@ -371,7 +371,6 @@ env.Library( "oplog_note.cpp", 'read_write_concern_defaults_server_status.cpp', "resize_oplog.cpp", - "restart_catalog_command.cpp", 'rwc_defaults_commands.cpp', "set_feature_compatibility_version_command.cpp", "set_index_commit_quorum_command.cpp", diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp deleted file mode 100644 index 99858abf378..00000000000 --- a/src/mongo/db/commands/restart_catalog_command.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * <http://www.mongodb.com/licensing/server-side-public-license>. - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand - -#include "mongo/platform/basic.h" - -#include <string> -#include <vector> - -#include "mongo/db/catalog/catalog_control.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/util/log.h" - -namespace mongo { -/** - * This testing-only command causes the server to close and reopen the catalog, rebuilding all - * in-memory data structures. - */ -class RestartCatalogCmd final : public BasicCommand { -public: - RestartCatalogCmd() : BasicCommand("restartCatalog") {} - - Status checkAuthForOperation(OperationContext* opCtx, - const std::string& dbname, - const BSONObj& cmdObj) const final { - // No auth checks as this is a testing-only command. - return Status::OK(); - } - - bool adminOnly() const final { - return true; - } - - bool maintenanceMode() const final { - return true; - } - - bool maintenanceOk() const final { - return false; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const final { - return AllowedOnSecondary::kAlways; - } - - bool supportsWriteConcern(const BSONObj& cmd) const final { - return false; - } - - std::string help() const final { - return "restart catalog\n" - "Internal command for testing only. Closes and restores the catalog, rebuilding\n" - "in-memory data structures as needed.\n"; - } - - bool run(OperationContext* opCtx, - const std::string& dbNameUnused, - const BSONObj& cmdObj, - BSONObjBuilder& result) final { - // This command is uninterruptible; otherwise the catalog might be in an incomplete state. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); - Lock::GlobalLock global(opCtx, MODE_X); - - // This command will fail without modifying the catalog if there are any databases that are - // marked drop-pending. (Otherwise, the Database object will be reconstructed when - // re-opening the catalog, but with the drop pending flag cleared.) - auto databaseHolder = DatabaseHolder::get(opCtx); - std::vector<std::string> allDbs = databaseHolder->getNames(); - for (auto&& dbName : allDbs) { - const auto db = databaseHolder->getDb(opCtx, dbName); - if (db->isDropPending(opCtx)) { - uasserted(ErrorCodes::DatabaseDropPending, - str::stream() << "cannot restart the catalog because database " << dbName - << " is pending removal"); - } - } - - log() << "Closing database catalog"; - auto state = catalog::closeCatalog(opCtx); - - log() << "Reopening database catalog"; - catalog::openCatalog(opCtx, state); - - return true; - } -}; - -MONGO_REGISTER_TEST_COMMAND(RestartCatalogCmd); - -} // namespace mongo diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp index 27a1e478fd4..31ea9a4fa45 100644 --- a/src/mongo/db/exec/requires_collection_stage.cpp +++ b/src/mongo/db/exec/requires_collection_stage.cpp @@ -72,8 +72,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() { invariant(_collection); uassert(ErrorCodes::QueryPlanKilled, - str::stream() - << "Database epoch changed due to a database-level event such as 'restartCatalog'.", + str::stream() << "Database epoch changed due to a database-level event.", getDatabaseEpoch(_collection) == _databaseEpoch); doRestoreStateRequiresCollection(); diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp index 79577d0b7df..06b33cc30d4 100644 --- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp +++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp @@ -387,30 +387,6 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase) ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled); } -TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnRestartCatalog) { - // TODO: SERVER-40588. Avoid restarting the catalog on the Biggie storage engine as it - // currently does not support this feature. - if (storageGlobalParams.engine == "biggie") { - return; - } - - auto exec = getCollscan(); - - // Partially scan the collection. - BSONObj obj; - for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); - ASSERT_EQUALS(i, obj["foo"].numberInt()); - } - - // Restart the catalog during yield. Verify that yield recovery throws with the expected error - // code. - exec->saveState(); - BSONObj info; - ASSERT_TRUE(_client.runCommand("admin", BSON("restartCatalog" << 1), info)); - ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled); -} - TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllIndices) { BSONObj keyPattern = BSON("foo" << 1); ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), keyPattern)); diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript index 038dfed7487..ca56afe77a1 100644 --- a/src/mongo/s/commands/SConscript +++ b/src/mongo/s/commands/SConscript @@ -77,7 +77,6 @@ env.Library( 'cluster_remove_shard_from_zone_cmd.cpp', 'cluster_repl_set_get_status_cmd.cpp', 'cluster_reset_error_cmd.cpp', - 'cluster_restart_catalog_command.cpp', 'cluster_rwc_defaults_commands.cpp', 'cluster_set_index_commit_quorum_cmd.cpp', 'cluster_set_feature_compatibility_version_cmd.cpp', diff --git a/src/mongo/s/commands/cluster_restart_catalog_command.cpp b/src/mongo/s/commands/cluster_restart_catalog_command.cpp deleted file mode 100644 index 4f3af29908c..00000000000 --- a/src/mongo/s/commands/cluster_restart_catalog_command.cpp +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * <http://www.mongodb.com/licensing/server-side-public-license>. - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/s/cluster_commands_helpers.h" - -namespace mongo { -namespace { - -class ClusterRestartCatalogCmd : public BasicCommand { -public: - ClusterRestartCatalogCmd() : BasicCommand("restartCatalog") {} - - std::string help() const override { - return "Internal command for testing only. Forwards the restartCatalog command to all " - "shards."; - } - - Status checkAuthForOperation(OperationContext* opCtx, - const std::string& dbname, - const BSONObj& cmdObj) const override { - // No auth checks as this is a testing-only command. - return Status::OK(); - } - - bool adminOnly() const override { - return true; - } - - bool maintenanceMode() const override { - return true; - } - - bool maintenanceOk() const override { - return false; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kAlways; - } - - bool supportsWriteConcern(const BSONObj& cmd) const override { - return false; - } - - bool run(OperationContext* opCtx, - const std::string& db, - const BSONObj& cmdObj, - BSONObjBuilder& result) override { - auto shardResponses = scatterGatherUnversionedTargetAllShards( - opCtx, - db, - applyReadWriteConcern( - opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)), - ReadPreferenceSetting::get(opCtx), - Shard::RetryPolicy::kIdempotent); - - // Intentionally not adding the error message to 'result', as it will already contain all - // the errors from the shards in a field named 'raw'. - std::string errmsg; - return appendRawResponses(opCtx, &errmsg, &result, shardResponses); - } -}; - -MONGO_REGISTER_TEST_COMMAND(ClusterRestartCatalogCmd); - -} // namespace -} // namespace mongo |