summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthrough')
-rw-r--r--jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js52
-rw-r--r--jstests/noPassthrough/restart_catalog_preserves_min_visible.js45
-rw-r--r--jstests/noPassthrough/restart_catalog_sharded_cluster.js215
-rw-r--r--jstests/noPassthrough/rollback_wt_drop.js3
4 files changed, 0 insertions, 315 deletions
diff --git a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js b/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
deleted file mode 100644
index 420c3446234..00000000000
--- a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * SERVER-35671: Ensure that if database has background operations it can't be closed and that
- * attempting to close it won't leave it in an inconsistant state.
- *
- * @tags: [requires_replication, uses_transactions]
- */
-
-(function() {
-"use strict";
-
-load("jstests/noPassthrough/libs/index_build.js");
-
-let replSet = new ReplSetTest({name: "server35671", nodes: 1});
-let setFailpointBool = (testDB, failpointName, alwaysOn, times) => {
- if (times) {
- return testDB.adminCommand({configureFailPoint: failpointName, mode: {"times": times}});
- } else if (alwaysOn) {
- return testDB.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"});
- } else {
- return testDB.adminCommand({configureFailPoint: failpointName, mode: "off"});
- }
-};
-replSet.startSet();
-replSet.initiate();
-let testDB = replSet.getPrimary().getDB("test");
-// This test depends on using the IndexBuildsCoordinator to build this index, which as of
-// SERVER-44405, will not occur in this test unless the collection is created beforehand.
-assert.commandWorked(testDB.runCommand({create: "coll"}));
-
-// Insert document into collection to avoid optimization for index creation on an empty collection.
-// This allows us to pause index builds on the collection using a fail point.
-assert.commandWorked(testDB.getCollection("coll").insert({a: 1}));
-
-setFailpointBool(testDB, "hangAfterStartingIndexBuildUnlocked", true);
-
-// Blocks because of failpoint
-let join = startParallelShell(
- "db.getSiblingDB('test').coll.createIndex({a: 1, b: 1}, {background: true})", replSet.ports[0]);
-
-// Let the createIndex start to run.
-IndexBuildTest.waitForIndexBuildToScanCollection(testDB, "coll", "a_1_b_1");
-
-// Repeated calls should continue to fail without crashing.
-assert.commandFailed(testDB.adminCommand({restartCatalog: 1}));
-assert.commandFailed(testDB.adminCommand({restartCatalog: 1}));
-assert.commandFailed(testDB.adminCommand({restartCatalog: 1}));
-
-// Unset failpoint so we can join the parallel shell.
-setFailpointBool(testDB, "hangAfterStartingIndexBuildUnlocked", false);
-join();
-replSet.stopSet();
-})();
diff --git a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js b/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
deleted file mode 100644
index 18127ce27d1..00000000000
--- a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Restarting the catalog will destroy and recreate all database/collection/index objects from the
- * storage engine state. However, some fields like the `minimumVisibleSnapshot` timestamp are not
- * persisted to storage. This value is typically rehydrated when performing replication
- * recovery. However there are cases where reads can be at a timestamp prior to where replication
- * recovery begins. Those are fixed by copying the previous value over from the destroyed catalog
- * object to the recreated one.
- *
- * This test verifies the collection's minimum visible snapshot timestamp is appropriately copied
- * over.
- *
- * @tags: [requires_replication]
- */
-(function() {
-"use strict";
-
-let replSet = new ReplSetTest({name: "server35317", nodes: 1});
-replSet.startSet();
-replSet.initiate();
-
-let prim = replSet.getPrimary();
-let beforeIndexBuild = assert.commandWorked(prim.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn"
-}))["operationTime"];
-assert.commandWorked(prim.getDB("test").coll.insert({c: 1}));
-assert.commandWorked(prim.getDB("test").coll.createIndex({c: 1}));
-assert.commandWorked(prim.adminCommand({restartCatalog: 1}));
-
-let session = prim.startSession({causalConsistency: false});
-let sessionDb = session.getDatabase("test");
-// Prior to fixing SERVER-35317, this would crash a debug build, or return success on a
-// non-debug build. Now it should return an error. Specifically, this fails because we're
-// trying to read behind the minimum visible snapshot timestamp for the `test.coll`
-// collection.
-assert.commandFailed(sessionDb.runCommand({
- find: "coll",
- filter: {c: 1},
- readConcern: {level: "snapshot", atClusterTime: beforeIndexBuild},
- txnNumber: NumberLong(0)
-}));
-
-session.endSession();
-replSet.stopSet();
-})();
diff --git a/jstests/noPassthrough/restart_catalog_sharded_cluster.js b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
deleted file mode 100644
index 782fa9aa913..00000000000
--- a/jstests/noPassthrough/restart_catalog_sharded_cluster.js
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Tests restarting the catalog in a sharded cluster on the config server and the shards.
- * @tags: [requires_replication, requires_sharding, requires_majority_read_concern]
- */
-(function() {
-"use strict";
-
-// Only run this test if the storage engine is "wiredTiger" or "inMemory".
-const acceptedStorageEngines = ["wiredTiger", "inMemory"];
-const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
-if (!acceptedStorageEngines.includes(currentStorageEngine)) {
- jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
- " storage engine");
- return;
-}
-
-// Helper function for sorting documents in JavaScript.
-function sortOn(fieldName) {
- return (doc1, doc2) => {
- return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]});
- };
-}
-
-const st = new ShardingTest({
- name: "restart_catalog_sharded_cluster",
- mongos: 1,
- config: 1,
- shards: {
- rs: true,
- rs0: {nodes: 1},
- rs1: {nodes: 1},
- },
- other: {
- enableBalancer: false,
- configOptions: {setParameter: "enableTestCommands=1"},
- shardOptions: {setParameter: "enableTestCommands=1"},
- }
-});
-const mongos = st.s0;
-const shard0 = st.shard0;
-const shard1 = st.shard1;
-
-const dbName = "drinks";
-
-// Create a sharded collection and distribute chunks amongst the shards.
-const coffees = [
- {_id: "americano", price: 1.5},
- {_id: "espresso", price: 2.0},
- {_id: "starbucks", price: 1000.0}
-];
-const coffeeColl = mongos.getDB(dbName).getCollection("coffee");
-assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shard0.shardName);
-assert.commandWorked(
- mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}}));
-const splitPoint = 50.0;
-assert.commandWorked(
- mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}}));
-for (let coffee of coffees) {
- assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}}));
-}
-assert.commandWorked(mongos.adminCommand({
- moveChunk: coffeeColl.getFullName(),
- find: {price: 1000.0},
- to: shard1.shardName,
- _waitForDelete: true
-}));
-assert.commandWorked(mongos.adminCommand({
- moveChunk: coffeeColl.getFullName(),
- find: {price: 0.0},
- to: shard0.shardName,
- _waitForDelete: true
-}));
-
-// Create an unsharded collection and throw some data in.
-const teaColl = mongos.getDB(dbName).getCollection("tea");
-const teas =
- [{_id: "darjeeling", price: 2.0}, {_id: "earl gray", price: 1.5}, {_id: "sencha", price: 3.5}];
-for (let tea of teas) {
- assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}}));
-}
-
-// Run queries on both the sharded and unsharded collection.
-function assertShardsHaveExpectedData() {
- const dbShard0 = shard0.getDB(dbName);
- const dbShard1 = shard1.getDB(dbName);
-
- // Assert that we can find all documents in the unsharded collection by either asking
- // mongos, or consulting the primary shard directly.
- assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(),
- teas.sort(sortOn("_id")),
- "couldn't find all unsharded data via mongos");
- assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(),
- teas.sort(sortOn("_id")),
- "couldn't find all unsharded data directly via primary shard");
- assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price")));
-
- // Assert that we can find all documents in the sharded collection via scatter-gather.
- assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(),
- coffees.sort(sortOn("_id")),
- "couldn't find all sharded data via mongos scatter-gather");
-
- // Assert that we can find all documents via a query that targets multiple shards.
- assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(),
- coffees.sort(sortOn("price")),
- "couldn't find all sharded data via mongos multi-shard targeted query");
-
- // Assert that we can find all sharded documents on shard0 by shard targeting via mongos,
- // and by consulting shard0 directly.
- const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id"));
- assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(),
- dataShard0,
- "couldn't find shard0 data via targeting through mongos");
- jsTest.log(tojson(dbShard0.getCollectionInfos()));
- assert.eq(dbShard0.coffee.find().toArray(),
- dataShard0,
- "couldn't find shard0 data by directly asking shard0");
-
- // Assert that we can find all sharded documents on shard1 by shard targeting via mongos,
- // and by consulting shard1 directly.
- const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id"));
- assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(),
- dataShard1,
- "couldn't find shard1 data via targeting through mongos");
- assert.eq(dbShard1.coffee.find().toArray(),
- dataShard1,
- "couldn't find shard1 data by directly asking shard1");
-}
-assertShardsHaveExpectedData();
-
-// Run queries on the metadata stored in the config servers.
-function assertConfigServersHaveExpectedData() {
- const configDBViaMongos = mongos.getDB("config");
- const configDBViaConfigSvr = st.config0.getDB("config");
- const projectOnlyShard = {_id: 0, shard: 1};
-
- // Assert that we can find documents for chunk metadata, both via mongos and by asking the
- // config server primary directly.
- const smallestChunk = {"max.price": splitPoint};
- const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"};
- assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(),
- [smallestChunkShard]);
- assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(),
- [smallestChunkShard]);
-
- const largestChunk = {"min.price": splitPoint};
- const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"};
- assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(),
- [largestChunkShard]);
- assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(),
- [largestChunkShard]);
-}
-assertConfigServersHaveExpectedData();
-
-// Restart the catalog on the config server primary, then assert that both collection data and
-// sharding metadata are as expected.
-assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1}));
-assertConfigServersHaveExpectedData();
-assertShardsHaveExpectedData();
-
-// Remember what indexes are present, then restart the catalog on all shards via mongos.
-const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id"));
-const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
-assert.commandWorked(mongos.adminCommand({restartCatalog: 1}));
-
-// Verify that the data in the collections and the metadata have not changed.
-assertConfigServersHaveExpectedData();
-assertShardsHaveExpectedData();
-
-// Verify that both the sharded and unsharded collection have the same indexes as prior to the
-// restart.
-const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id"));
-assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart);
-const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
-assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart);
-
-// Create new indexes on both collections and verify that queries return the same results.
-[teaColl, coffeeColl].forEach(coll => {
- assert.commandWorked(coll.createIndex({price: -1}));
- assert.commandWorked(coll.createIndex({price: 1, _id: 1}));
-});
-assertShardsHaveExpectedData();
-
-// Modify the existing collections.
-const validator = {
- price: {$gt: 0}
-};
-[teaColl, coffeeColl].forEach(coll => {
- assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
- assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure);
-});
-
-// Perform another write, implicitly creating a new collection and database.
-const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2");
-const foodColl = secondTestDB.getCollection("food");
-const doc = {
- _id: "apple",
- category: "fruit"
-};
-assert.commandWorked(foodColl.insert(doc));
-assert.commandWorked(foodColl.createIndex({category: 1}));
-assert.eq(foodColl.find().toArray(), [doc]);
-
-// Shard the new collection and verify we can find its data again.
-assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()}));
-assert.commandWorked(
- mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}}));
-assert.eq(foodColl.find().toArray(), [doc]);
-
-// Build a new index on the new collection.
-assert.commandWorked(foodColl.createIndex({category: -1}));
-assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
-
-st.stop();
-}());
diff --git a/jstests/noPassthrough/rollback_wt_drop.js b/jstests/noPassthrough/rollback_wt_drop.js
index 8c235695439..7e9fc7b35e2 100644
--- a/jstests/noPassthrough/rollback_wt_drop.js
+++ b/jstests/noPassthrough/rollback_wt_drop.js
@@ -93,9 +93,6 @@ let RollbackOps = (node) => {
assert.commandWorked(mydb.createCollection(tempColl.getName()));
assert.commandWorked(tempColl.insert({_id: 100, y: 100}));
assert(tempColl.drop());
-
- // restartCatalog should not remove drop-pending idents.
- assert.commandWorked(mydb.adminCommand({restartCatalog: 1}));
};
// Set up Rollback Test.