summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2018-07-27 17:09:24 -0400
committerMaria van Keulen <maria@mongodb.com>2018-08-07 14:26:55 -0400
commit88a8b7b31ca41eca88e782d84e496911c93cc0ae (patch)
treef60e0bee8e351180ad2c256c3c8946ec35fadeb6 /jstests
parent4c16f0f336f4db77034e8aa594bbd4a5bac3f40c (diff)
downloadmongo-88a8b7b31ca41eca88e782d84e496911c93cc0ae.tar.gz
SERVER-36257 Remove copyDB and clone commands
Diffstat (limited to 'jstests')
-rw-r--r--jstests/auth/copyauth.js275
-rw-r--r--jstests/auth/copyauth2.js44
-rw-r--r--jstests/auth/copyauth_between_shards.js60
-rw-r--r--jstests/auth/lib/commands_lib.js20
-rw-r--r--jstests/auth/localhostAuthBypass.js5
-rw-r--r--jstests/core/auth_copydb.js26
-rw-r--r--jstests/core/bypass_doc_validation.js16
-rw-r--r--jstests/core/collation.js35
-rw-r--r--jstests/core/commands_namespace_parsing.js7
-rw-r--r--jstests/core/copydb.js28
-rw-r--r--jstests/core/system_profile.js11
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/libs/override_methods/auto_retry_on_network_error.js1
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js1
-rw-r--r--jstests/noPassthrough/copydb_illegal_collections.js29
-rw-r--r--jstests/noPassthrough/index_version_autoupgrade.js23
-rw-r--r--jstests/noPassthroughWithMongod/server7428.js24
-rw-r--r--jstests/replsets/cloneDb.js128
-rw-r--r--jstests/replsets/copydb.js97
-rw-r--r--jstests/replsets/localhostAuthBypass.js5
-rw-r--r--jstests/sharding/auth_copydb.js49
-rw-r--r--jstests/sharding/copydb_from_mongos.js22
-rw-r--r--jstests/sharding/database_and_shard_versioning_all_commands.js28
-rw-r--r--jstests/sharding/libs/last_stable_mongos_commands.js16
-rw-r--r--jstests/sharding/localhostAuthBypass.js3
-rw-r--r--jstests/sharding/printShardingStatus.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js10
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js10
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js10
29 files changed, 48 insertions, 938 deletions
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js
deleted file mode 100644
index 9caa51a6e87..00000000000
--- a/jstests/auth/copyauth.js
+++ /dev/null
@@ -1,275 +0,0 @@
-// Test copyDatabase command with various combinations of authed/unauthed and single node/replica
-// set source and dest.
-
-TestData.authMechanism = "SCRAM-SHA-1"; // SERVER-11428
-DB.prototype._defaultAuthenticationMechanism = "SCRAM-SHA-1"; // SERVER-11428
-
-// We turn off gossiping the mongo shell's clusterTime because this test connects to replica sets
-// and sharded clusters as a user other than __system. Attempting to advance the clusterTime while
-// it has been signed with a dummy key results in an authorization error.
-TestData.skipGossipingClusterTime = true;
-
-var baseName = "jstests_clone_copyauth";
-
-/*
- * Helper to spawn a replica set, sharded cluster, or a single mongod and hide it all behind the
- * same interface.
- *
- * Arguments:
- *
- * clusterType - type of cluster to start. Options are "sharded", "repl", or "single".
- * startWithAuth - whether to start the cluster with authentication.
- * startWithTransitionToAuth - whether to start the cluster with --transitionToAuth (startWithAuth
- * must also be true).
- *
- * Member variables:
- *
- * conn - a connection to the node used to access this cluster, whether it's the mongod, a primary
- * mongod in a replica set, or a mongos.
- * connString - the full connection string used to connect to this cluster. For a replica set this
- * is the full connection string including the replica set name.
- *
- * Member functions:
- *
- * stop() - stop and cleanup whatever nodes the helper spawned when it was created.
- * @tags: [requires_replication, requires_sharding]
- */
-function ClusterSpawnHelper(clusterType, startWithAuth, startWithTransitionToAuth) {
- var singleNodeConfig = {};
- if (startWithAuth) {
- singleNodeConfig.keyFile = "jstests/libs/key1";
- if (startWithTransitionToAuth) {
- singleNodeConfig.transitionToAuth = "";
- }
- }
- if (clusterType === "sharded") {
- var shardingTestConfig = {
- name: baseName + "_source",
- keyFile: singleNodeConfig.keyFile,
- mongos: [singleNodeConfig],
- shards: [singleNodeConfig],
- config: [singleNodeConfig]
- };
- var shardingTest = new ShardingTest(shardingTestConfig);
- this.conn = shardingTest.s;
- this.connString = this.conn.host;
- } else if (clusterType === "repl") {
- var replSetTestConfig = {
- name: baseName + "_source",
- nodes: 3,
- nodeOptions: singleNodeConfig,
- keyFile: singleNodeConfig.keyFile
- };
- var replSetTest = new ReplSetTest(replSetTestConfig);
- replSetTest.startSet();
- replSetTest.initiate();
- if (startWithAuth) {
- authutil.asCluster(
- replSetTest.nodes, replSetTestConfig.nodeOptions.keyFile, function() {
- replSetTest.awaitReplication();
- });
- } else {
- replSetTest.awaitReplication();
- }
- this.conn = replSetTest.getPrimary();
- this.connString = replSetTest.getURL();
- } else {
- this.conn = MongoRunner.runMongod(singleNodeConfig);
- this.connString = this.conn.host;
- }
-
- this.stop = function() {
- if (clusterType === "sharded") {
- shardingTest.stop();
- } else if (clusterType === "repl") {
- replSetTest.stopSet();
- } else {
- MongoRunner.stopMongod(this.conn);
- }
- };
-}
-
-/*
- * Helper to test the running the "copydb" command between various kinds of clusters and various
- * combinations of authentication on the source and target.
- *
- * @param {Object} configObj
- *
- * {
- * sourceClusterType {string}: Type of cluster to use as the source of the copy. Options are
- * "single", "repl", "sharded".
- * isSourceUsingAuth {bool}: Whether to use auth in the source cluster for the copy.
- * targetClusterType {string}: Type of cluster to use as the target of the copy. Options are
- * "single", "repl", "sharded".
- * isTargetUsingAuth {bool}: Whether to use auth in the target cluster for the copy.
- * }
- */
-function copydbBetweenClustersTest(configObj) {
- // First sanity check the arguments in our configObj
- var requiredKeys = [
- 'sourceClusterType',
- 'isSourceUsingAuth',
- 'targetClusterType',
- 'isTargetUsingAuth',
- 'isSourceUsingTransitionToAuth',
- 'isTargetUsingTransitionToAuth'
- ];
-
- var i;
- for (i = 0; i < requiredKeys.length; i++) {
- assert(configObj.hasOwnProperty(requiredKeys[i]),
- "Missing required key: " + requiredKeys[i] + " in config object");
- }
-
- // 1. Get a connection to the source database, insert data and setup auth if applicable
- source = new ClusterSpawnHelper(configObj.sourceClusterType,
- configObj.isSourceUsingAuth,
- configObj.isSourceUsingTransitionToAuth);
-
- if (configObj.isSourceUsingAuth) {
- // Create a super user so we can create a regular user and not be locked out afterwards
- source.conn.getDB("admin").createUser(
- {user: "sourceSuperUser", pwd: "sourceSuperUser", roles: ["root"]});
- source.conn.getDB("admin").auth("sourceSuperUser", "sourceSuperUser");
-
- source.conn.getDB(baseName)[baseName].save({i: 1});
- assert.eq(1, source.conn.getDB(baseName)[baseName].count());
- assert.eq(1, source.conn.getDB(baseName)[baseName].findOne().i);
-
- // Insert a document and create a regular user that we will use for the target
- // authenticating with the source
- source.conn.getDB(baseName).createUser({user: "foo", pwd: "bar", roles: ["dbOwner"]});
-
- source.conn.getDB("admin").logout();
-
- var readWhenLoggedOut = function() {
- source.conn.getDB(baseName)[baseName].findOne();
- };
- if (configObj.isSourceUsingTransitionToAuth) {
- // transitionToAuth does not turn on access control
- assert.doesNotThrow(readWhenLoggedOut);
- } else {
- assert.throws(readWhenLoggedOut);
- }
- } else {
- source.conn.getDB(baseName)[baseName].save({i: 1});
- assert.eq(1, source.conn.getDB(baseName)[baseName].count());
- assert.eq(1, source.conn.getDB(baseName)[baseName].findOne().i);
- }
-
- // 2. Get a connection to the target database, and set up auth if necessary
- target = new ClusterSpawnHelper(configObj.targetClusterType,
- configObj.isTargetUsingAuth,
- configObj.isTargetUsingTransitionToAuth);
-
- if (configObj.isTargetUsingAuth) {
- target.conn.getDB("admin").createUser(
- {user: "targetSuperUser", pwd: "targetSuperUser", roles: ["root"]});
-
- var readWhenLoggedOut = function() {
- target.conn.getDB(baseName)[baseName].findOne();
- };
- if (configObj.isTargetUsingTransitionToAuth) {
- // transitionToAuth does not turn on access control
- assert.doesNotThrow(readWhenLoggedOut);
- } else {
- assert.throws(readWhenLoggedOut);
- }
-
- target.conn.getDB("admin").auth("targetSuperUser", "targetSuperUser");
- }
-
- // 3. Run the copydb command
- target.conn.getDB(baseName).dropDatabase();
- assert.eq(0, target.conn.getDB(baseName)[baseName].count());
- if (configObj.isSourceUsingAuth) {
- // We only need to pass username and password if the target has to send authentication
- // information to the source cluster
- assert.commandWorked(target.conn.getDB(baseName).copyDatabase(
- baseName, baseName, source.connString, "foo", "bar"));
- } else {
- // We are copying from a cluster with no auth
- assert.commandWorked(
- target.conn.getDB(baseName).copyDatabase(baseName, baseName, source.connString));
- }
- assert.eq(1, target.conn.getDB(baseName)[baseName].count());
- assert.eq(1, target.conn.getDB(baseName)[baseName].findOne().i);
-
- if (configObj.isTargetUsingAuth) {
- target.conn.getDB("admin").logout();
- }
-
- // 4. Do any necessary cleanup
- source.stop();
- target.stop();
-}
-
-(function() {
- "use strict";
-
- var sourceClusterTypeValues = ["single", "repl", "sharded"];
- var isSourceUsingAuthValues = [true, false];
- var isSourceUsingTransitionToAuthValues = [true, false];
- var targetClusterTypeValues = ["single", "repl", "sharded"];
- var isTargetUsingAuthValues = [true, false];
- var isTargetUsingTransitionToAuthValues = [true, false];
- for (var i = 0; i < sourceClusterTypeValues.length; i++) {
- for (var j = 0; j < isSourceUsingAuthValues.length; j++) {
- for (var k = 0; k < targetClusterTypeValues.length; k++) {
- for (var l = 0; l < isTargetUsingAuthValues.length; l++) {
- if (sourceClusterTypeValues[i] === "sharded" &&
- targetClusterTypeValues[k] === "sharded") {
- // SERVER-13112
- continue;
- }
- if (sourceClusterTypeValues[i] === "repl" &&
- targetClusterTypeValues[k] === "repl") {
- // SERVER-13077
- continue;
- }
- if (isSourceUsingAuthValues[j] === true &&
- targetClusterTypeValues[k] === "sharded") {
- // SERVER-6427
- continue;
- }
- if (sourceClusterTypeValues[i] === "repl" &&
- isSourceUsingAuthValues[j] === false &&
- targetClusterTypeValues[k] === "sharded" &&
- isTargetUsingAuthValues[l] === true) {
- // SERVER-18103
- continue;
- }
-
- for (var m = 0; m < isSourceUsingTransitionToAuthValues.length; m++) {
- if (isSourceUsingTransitionToAuthValues[m] === true &&
- isSourceUsingAuthValues[j] === false) {
- // transitionToAuth requires auth parameters
- continue;
- }
- for (var n = 0; n < isTargetUsingTransitionToAuthValues.length; n++) {
- if (isTargetUsingTransitionToAuthValues[n] === true &&
- isTargetUsingAuthValues[l] === false) {
- // transitionToAuth requires auth parameters
- continue;
- }
- var testCase = {
- 'sourceClusterType': sourceClusterTypeValues[i],
- 'isSourceUsingAuth': isSourceUsingAuthValues[j],
- 'targetClusterType': targetClusterTypeValues[k],
- 'isTargetUsingAuth': isTargetUsingAuthValues[l],
- 'isSourceUsingTransitionToAuth':
- isSourceUsingTransitionToAuthValues[m],
- 'isTargetUsingTransitionToAuth':
- isTargetUsingTransitionToAuthValues[n]
- };
- print("Running copydb with auth test:");
- printjson(testCase);
- copydbBetweenClustersTest(testCase);
- }
- }
- }
- }
- }
- }
-}());
-print(baseName + " success!");
diff --git a/jstests/auth/copyauth2.js b/jstests/auth/copyauth2.js
deleted file mode 100644
index 3355e6d67d1..00000000000
--- a/jstests/auth/copyauth2.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Basic test that copydb works with auth enabled when copying within the same cluster
-// @tags: [requires_sharding]
-
-function runTest(a, b) {
- a.createUser({user: "chevy", pwd: "chase", roles: ["read", {role: 'readWrite', db: b._name}]});
- a.foo.insert({a: 1});
- b.getSiblingDB("admin").logout();
-
- a.auth("chevy", "chase");
-
- assert.eq(1, a.foo.count(), "A");
- assert.eq(0, b.foo.count(), "B");
-
- a.copyDatabase(a._name, b._name);
- assert.eq(1, a.foo.count(), "C");
- assert.eq(1, b.foo.count(), "D");
-}
-
-// run all tests standalone
-var conn = MongoRunner.runMongod({auth: ""});
-var a = conn.getDB("copydb2-test-a");
-var b = conn.getDB("copydb2-test-b");
-var adminDB = conn.getDB("admin");
-adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
-adminDB.auth("root", "root");
-runTest(a, b);
-MongoRunner.stopMongod(conn);
-
-/** Doesn't work in a sharded setup due to SERVER-13080
-// run all tests sharded
-var st = new ShardingTest({
- shards: 2,
- mongos: 1,
- keyFile: "jstests/libs/key1",
-});
-var a = st.s.getDB( "copydb2-test-a" );
-var b = st.s.getDB( "copydb2-test-b" );
-st.s.getDB( "admin" ).createUser({user: "root", pwd: "root", roles: ["root"]});
-st.s.getDB( "admin" ).auth("root", "root");
-runTest(a, b);
-st.stop();
-*/
-
-print("Successfully completed copyauth2.js test.");
diff --git a/jstests/auth/copyauth_between_shards.js b/jstests/auth/copyauth_between_shards.js
deleted file mode 100644
index aed4a6847a0..00000000000
--- a/jstests/auth/copyauth_between_shards.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Test copyDatabase command inside a sharded cluster with and without auth. Tests with auth are
-// currently disabled due to SERVER-13080.
-// @tags: [requires_sharding, requires_replication]
-
-var baseName = "jstests_clone_copyauth_between_shards";
-
-function copydbWithinShardedCluster(useReplSets, passCredentials, useAuth) {
- var clusterConfig = {shards: 1, mongos: 1, config: 1};
-
- if (useAuth) {
- clusterConfig.auth = "";
- clusterConfig.keyFile = "jstests/libs/key1";
- }
-
- if (useReplSets) {
- clusterConfig.rs = {};
- }
- var st = new ShardingTest(clusterConfig);
-
- var mongos = st.s;
-
- var test1 = mongos.getDB('test1');
- var test2 = mongos.getDB('test2');
-
- if (useAuth) {
- mongos.getDB("admin").createUser({user: "super", pwd: "super", roles: ["root"]});
- assert.throws(function() {
- mongos.getDB("test1")["test1"].findOne();
- });
- mongos.getDB("admin").auth("super", "super");
- }
-
- test1.getCollection('test').insert({foo: 'bar'});
- jsTestLog('Test document on source db:');
- printjson(test1.getCollection('test').findOne());
- jsTestLog('copydb');
-
- // The copyDatabase command acts differently depending on whether we pass username and password
- if (passCredentials) {
- var result =
- mongos.getDB('admin').copyDatabase('test1', 'test2', undefined, "super", "super");
- } else {
- var result = mongos.getDB('admin').copyDatabase('test1', 'test2');
- }
- printjson(result);
- assert.eq(result.ok, 1.0);
- jsTestLog('Test document on destination db:');
- printjson(test2.getCollection('test').findOne());
- st.stop();
-}
-
-// SERVER-13080
-// copydbWithinShardedCluster(true, true, true);
-// copydbWithinShardedCluster(false, true, true);
-// copydbWithinShardedCluster(true, false, true);
-// copydbWithinShardedCluster(false, false, true);
-copydbWithinShardedCluster(true, false, false);
-copydbWithinShardedCluster(false, false, false);
-
-print(baseName + " success!");
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index ed7ae60138b..f3635477545 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -2291,26 +2291,6 @@ var authCommandsLib = {
]
},
{
- testname: "copydb",
- command: {copydb: 1, fromdb: firstDbName, todb: secondDbName},
- skipSharded: true, // Does not work sharded due to SERVER-13080
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- {resource: {db: firstDbName, collection: ""}, actions: ["find"]},
- {resource: {db: firstDbName, collection: "system.js"}, actions: ["find"]},
- {
- resource: {db: secondDbName, collection: ""},
- actions: ["insert", "createIndex"]
- },
- {resource: {db: secondDbName, collection: "system.js"}, actions: ["insert"]},
- ]
- },
- ]
- },
- {
testname: "createRole_authenticationRestrictions",
command: {
createRole: "testRole",
diff --git a/jstests/auth/localhostAuthBypass.js b/jstests/auth/localhostAuthBypass.js
index e3bb03b661d..f68ed6dfba4 100644
--- a/jstests/auth/localhostAuthBypass.js
+++ b/jstests/auth/localhostAuthBypass.js
@@ -54,11 +54,8 @@ var assertCannotRunCommands = function(mongo) {
assert.throws(function() {
mongo.getDB("test").createUser({user: username, pwd: password, roles: ['readWrite']});
});
- // DB operations
- var authorizeErrorCode = 13;
- assert.commandFailedWithCode(
- mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
// Create collection
+ var authorizeErrorCode = 13;
assert.commandFailedWithCode(
mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
authorizeErrorCode,
diff --git a/jstests/core/auth_copydb.js b/jstests/core/auth_copydb.js
deleted file mode 100644
index b8a85ae1c03..00000000000
--- a/jstests/core/auth_copydb.js
+++ /dev/null
@@ -1,26 +0,0 @@
-// @tags: [
-// requires_non_retryable_commands,
-// requires_fastcount,
-// requires_auth,
-// assumes_write_concern_unchanged
-// ]
-
-a = db.getSisterDB("copydb2-test-a");
-b = db.getSisterDB("copydb2-test-b");
-
-a.dropDatabase();
-b.dropDatabase();
-a.dropAllUsers();
-b.dropAllUsers();
-
-a.foo.save({a: 1});
-
-a.createUser({user: "chevy", pwd: "chase", roles: jsTest.basicUserRoles});
-
-assert.eq(1, a.foo.count(), "A");
-assert.eq(0, b.foo.count(), "B");
-
-// SERVER-727
-a.copyDatabase(a._name, b._name, "", "chevy", "chase");
-assert.eq(1, a.foo.count(), "C");
-assert.eq(1, b.foo.count(), "D");
diff --git a/jstests/core/bypass_doc_validation.js b/jstests/core/bypass_doc_validation.js
index d05d20cf2ca..b3290ef4e04 100644
--- a/jstests/core/bypass_doc_validation.js
+++ b/jstests/core/bypass_doc_validation.js
@@ -5,7 +5,6 @@
*
* - aggregation with $out
* - applyOps (when not sharded)
- * - copyDb
* - doTxn (when not sharded)
* - findAndModify
* - insert
@@ -85,19 +84,6 @@
coll.aggregate(pipeline, {bypassDocumentValidation: true});
assert.eq(1, outputColl.count({aggregation: 1}));
- // Test the copyDb command.
- const copyDbName = dbName + '_copy';
- const copyDb = myDb.getSiblingDB(copyDbName);
- assert.commandWorked(copyDb.dropDatabase());
- let res = db.adminCommand(
- {copydb: 1, fromdb: dbName, todb: copyDbName, bypassDocumentValidation: false});
- assertFailsValidation(res);
- assert.eq(0, copyDb[collName].count());
- assert.commandWorked(copyDb.dropDatabase());
- assert.commandWorked(db.adminCommand(
- {copydb: 1, fromdb: dbName, todb: copyDbName, bypassDocumentValidation: true}));
- assert.eq(coll.count(), db.getSiblingDB(copyDbName)[collName].count());
-
// Test the findAndModify command.
assert.throws(function() {
coll.findAndModify(
@@ -114,7 +100,7 @@
const reduce = function() {
return 'mapReduce';
};
- res = myDb.runCommand({
+ let res = myDb.runCommand({
mapReduce: collName,
map: map,
reduce: reduce,
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index d6857527bdf..5d199ae8915 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -1899,41 +1899,6 @@
assert.eq(8, coll.findOne({_id: "foo"}).x);
}
- // Test that the collections created with the "copydb" command inherit the default collation of
- // the corresponding collection.
- {
- const sourceDB = db.getSiblingDB("collation");
- const destDB = db.getSiblingDB("collation_cloned");
-
- sourceDB.dropDatabase();
- destDB.dropDatabase();
-
- // Create a collection with a non-simple default collation.
- assert.commandWorked(
- sourceDB.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}}));
- var sourceCollectionInfos = sourceDB.getCollectionInfos({name: coll.getName()});
-
- assert.writeOK(sourceDB[coll.getName()].insert({_id: "FOO"}));
- assert.writeOK(sourceDB[coll.getName()].insert({_id: "bar"}));
- assert.eq([{_id: "FOO"}],
- sourceDB[coll.getName()].find({_id: "foo"}).toArray(),
- "query should have performed a case-insensitive match");
-
- assert.commandWorked(
- sourceDB.adminCommand({copydb: 1, fromdb: sourceDB.getName(), todb: destDB.getName()}));
- var destCollectionInfos = destDB.getCollectionInfos({name: coll.getName()});
-
- // The namespace for the _id index will differ since the source and destination collections
- // are in different databases. Same for UUID.
- delete sourceCollectionInfos[0].idIndex.ns;
- delete sourceCollectionInfos[0].info.uuid;
- delete destCollectionInfos[0].idIndex.ns;
- delete destCollectionInfos[0].info.uuid;
-
- assert.eq(sourceCollectionInfos, destCollectionInfos);
- assert.eq([{_id: "FOO"}], destDB[coll.getName()].find({_id: "foo"}).toArray());
- }
-
// Test that the collection created with the "cloneCollectionAsCapped" command inherits the
// default collation of the corresponding collection. We skip running this command in a sharded
// cluster because it isn't supported by mongos.
diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js
index 104b72f3456..db61de2b07b 100644
--- a/jstests/core/commands_namespace_parsing.js
+++ b/jstests/core/commands_namespace_parsing.js
@@ -233,13 +233,6 @@
assertFailsWithInvalidNamespacesForField(
"to", {renameCollection: "test.b", to: ""}, isFullyQualified, isAdminCommand);
- // Test copydb fails with an invalid fromdb name.
- assertFailsWithInvalidNamespacesForField(
- "fromdb", {copydb: 1, fromdb: "", todb: "b"}, isNotFullyQualified, isAdminCommand);
- // Test copydb fails with an invalid todb name.
- assertFailsWithInvalidNamespacesForField(
- "todb", {copydb: 1, fromdb: "a", todb: ""}, isNotFullyQualified, isAdminCommand);
-
// Test drop fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
"drop", {drop: ""}, isNotFullyQualified, isNotAdminCommand);
diff --git a/jstests/core/copydb.js b/jstests/core/copydb.js
deleted file mode 100644
index 991734f3f8d..00000000000
--- a/jstests/core/copydb.js
+++ /dev/null
@@ -1,28 +0,0 @@
-// @tags: [
-// requires_non_retryable_commands,
-// requires_fastcount,
-//
-// # copyDatabase is not available on embedded
-// incompatible_with_embedded
-// ]
-
-// Basic tests for the copydb command. These only test copying from the same server; these do not
-// test the ability of copydb to pull a database from another server (with or without auth).
-
-// Test basic command usage.
-var db1 = db.getSisterDB("copydb-test-db1");
-var db2 = db.getSisterDB("copydb-test-db2");
-assert.commandWorked(db1.dropDatabase());
-assert.commandWorked(db2.dropDatabase());
-assert.writeOK(db1.foo.save({db1: 1}));
-assert.commandWorked(db1.foo.ensureIndex({db1: 1}));
-assert.eq(1, db1.foo.count(), "A");
-assert.eq(0, db2.foo.count(), "B");
-assert.commandWorked(db1.copyDatabase(db1._name, db2._name));
-assert.eq(1, db1.foo.count(), "C");
-assert.eq(1, db2.foo.count(), "D");
-assert.eq(db1.foo.getIndexes().length, db2.foo.getIndexes().length);
-
-// Test command input validation.
-assert.commandFailed(db1.adminCommand(
- {copydb: 1, fromdb: db1.getName(), todb: "copydb.invalid"})); // Name can't contain dot.
diff --git a/jstests/core/system_profile.js b/jstests/core/system_profile.js
index 5198feebe80..2a805ba8942 100644
--- a/jstests/core/system_profile.js
+++ b/jstests/core/system_profile.js
@@ -68,14 +68,3 @@ assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.createCollection("foo"));
assert.commandFailed(testDB.adminCommand(
{renameCollection: testDB.foo.getFullName(), to: testDB.system.profile.getFullName()}));
-
-// Copying a database containing "system.profile" should succeed. The "system.profile" collection
-// should not be copied.
-assert.commandWorked(testDB.dropDatabase());
-assert.commandWorked(testDB.createCollection("foo"));
-assert.commandWorked(testDB.createCollection("system.profile"));
-assert.commandWorked(testDBCopy.dropDatabase());
-assert.commandWorked(
- testDB.adminCommand({copydb: 1, fromdb: testDB.getName(), todb: testDBCopy.getName()}));
-assert(testDBCopy.foo.exists());
-assert.isnull(testDBCopy.system.profile.exists());
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index b6f21425b0c..4283f459268 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -131,7 +131,6 @@
skip: "Tested in views/views_sharded.js",
},
clearLog: {skip: isUnrelated},
- clone: {skip: "Tested in replsets/cloneDb.js"},
cloneCollection: {skip: "Tested in noPassthroughWithMongod/clonecollection.js"},
cloneCollectionAsCapped: {
command: {cloneCollectionAsCapped: "view", toCollection: "testcapped", size: 10240},
@@ -147,7 +146,6 @@
connectionStatus: {skip: isUnrelated},
convertToCapped: {command: {convertToCapped: "view", size: 12345}, expectFailure: true},
coordinateCommitTransaction: {skip: isUnrelated},
- copydb: {skip: "Tested in replsets/copydb.js"},
copydbsaslstart: {skip: isUnrelated},
count: {command: {count: "view"}},
cpuload: {skip: isAnInternalCommand},
diff --git a/jstests/libs/override_methods/auto_retry_on_network_error.js b/jstests/libs/override_methods/auto_retry_on_network_error.js
index c68d0de200c..a37f9d1650e 100644
--- a/jstests/libs/override_methods/auto_retry_on_network_error.js
+++ b/jstests/libs/override_methods/auto_retry_on_network_error.js
@@ -67,7 +67,6 @@
"cloneCollectionAsCapped",
"collMod",
"convertToCapped",
- "copydb",
"create",
"createIndexes",
"createRole",
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index b41faa315df..b68d6c1c5a4 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -65,7 +65,6 @@
"collMod",
"commitTransaction",
"convertToCapped",
- "copydb",
"create",
"createIndexes",
"createRole",
diff --git a/jstests/noPassthrough/copydb_illegal_collections.js b/jstests/noPassthrough/copydb_illegal_collections.js
deleted file mode 100644
index 29c41d08ffc..00000000000
--- a/jstests/noPassthrough/copydb_illegal_collections.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * This test creates a replica set and tries copying the local database. It expects an error on
- * the `copydb` command when it runs across an illegal namespace to copy, e.g:
- * `local.system.replset` -> `db2.system.replset`.
- * @tags: [requires_replication, requires_persistence]
- */
-(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 1});
-
- rst.startSet();
- rst.initiate();
-
- var conn = rst.getPrimary(); // Waits for PRIMARY state.
- conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
- assert.neq(null, conn, "failed to restart");
-
- // Must drop the oplog in order to induce the correct error below.
- conn.getDB("local").oplog.rs.drop();
-
- var db1 = conn.getDB("local");
- var db2 = conn.getDB("db2");
-
- var res = db1.adminCommand({copydb: 1, fromdb: db1._name, todb: db2._name});
-
- assert.commandFailedWithCode(res, ErrorCodes.InvalidNamespace);
- assert.gt(res["errmsg"].indexOf("cannot write to 'db2.system.replset'"), -1);
- rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/index_version_autoupgrade.js b/jstests/noPassthrough/index_version_autoupgrade.js
index c9b4e523448..f6bbb55eee8 100644
--- a/jstests/noPassthrough/index_version_autoupgrade.js
+++ b/jstests/noPassthrough/index_version_autoupgrade.js
@@ -129,16 +129,6 @@
return coll;
}, false);
- // Test that the "copydb" command doesn't upgrade existing indexes to the latest version.
- testIndexVersionAutoUpgrades(function(coll) {
- assert.commandWorked(coll.getDB().adminCommand({
- copydb: 1,
- fromdb: coll.getDB().getName(),
- todb: "copied",
- }));
- return coll.getDB().getSiblingDB("copied")[coll.getName()];
- }, false);
-
// Test that the "cloneCollection" command doesn't upgrade existing indexes to the latest
// version.
var cloneConn = MongoRunner.runMongod({});
@@ -153,18 +143,5 @@
}, false);
MongoRunner.stopMongod(cloneConn);
- // Test that the "clone" command doesn't upgrade existing indexes to the latest version.
- cloneConn = MongoRunner.runMongod({});
- assert.neq(null, cloneConn, "mongod was unable to start up");
- testIndexVersionAutoUpgrades(function(coll) {
- var cloneDB = cloneConn.getDB(coll.getDB().getName());
- assert.commandWorked(cloneDB.runCommand({
- clone: conn.host,
- fromDB: coll.getDB().getName(),
- }));
- return cloneDB[coll.getName()];
- }, false);
- MongoRunner.stopMongod(cloneConn);
-
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthroughWithMongod/server7428.js b/jstests/noPassthroughWithMongod/server7428.js
deleted file mode 100644
index 7b5278a10e1..00000000000
--- a/jstests/noPassthroughWithMongod/server7428.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Regression test for SERVER-7428.
-
-// TODO(spencer): move this test out of slowNightly directory once there is a better place for tests
-// that start their own mongod's but aren't slow
-
-// Verify that the copyDatabase command works appropriately when the
-// target mongo instance has authentication enabled.
-
-(function() {
-
- // Setup fromDb with no auth
- var fromDb = MongoRunner.runMongod();
-
- // Setup toDb with auth
- var toDb = MongoRunner.runMongod({auth: ""});
- var admin = toDb.getDB("admin");
- admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
- admin.auth("foo", "bar");
-
- admin.copyDatabase('test', 'test', fromDb.host);
-
- MongoRunner.stopMongod(fromDb);
- MongoRunner.stopMongod(toDb);
-})();
diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js
deleted file mode 100644
index f4ed71bb972..00000000000
--- a/jstests/replsets/cloneDb.js
+++ /dev/null
@@ -1,128 +0,0 @@
-// Test cloning a database from a replica set (as full replica set uri, just the PRIMARY, or just a
-// SECONDARY) to a standalone server and viceversa (SERVER-1643)
-
-(function() {
- "use strict";
-
- if (jsTest.options().keyFile) {
- jsTest.log("Skipping test because clone command doesn't work with authentication enabled:" +
- " SERVER-4245");
- } else {
- var numDocs = 2000;
-
- // 1kb string
- var str = new Array(1000).toString();
-
- var replsetDBName = 'cloneDBreplset';
- var standaloneDBName = 'cloneDBstandalone';
- var testColName = 'foo';
- var testViewName = 'view';
-
- jsTest.log("Create replica set");
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
- var secondary = replTest._slaves[0];
- var masterDB = master.getDB(replsetDBName);
- masterDB.dropDatabase();
-
- jsTest.log("Create standalone server");
- var standalone = MongoRunner.runMongod();
- standalone.getDB("admin").runCommand({setParameter: 1, logLevel: 5});
- var standaloneDB = standalone.getDB(replsetDBName);
- standaloneDB.dropDatabase();
-
- jsTest.log("Insert data into replica set");
- var bulk = masterDB[testColName].initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
- }
- assert.writeOK(bulk.execute({w: 3}));
-
- jsTest.log("Create view on replica set");
- assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
-
- // Make sure all writes have replicated to secondary.
- replTest.awaitReplication();
-
- jsTest.log("Clone db from replica set to standalone server");
- standaloneDB.cloneDatabase(replTest.getURL());
- assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- 'cloneDatabase from replset to standalone failed (document counts do not match)');
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- 'cloneDatabase from replset to standalone failed (count on view incorrect)');
-
- jsTest.log("Clone db from replica set PRIMARY to standalone server");
- standaloneDB.dropDatabase();
- standaloneDB.cloneDatabase(master.host);
- assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- 'cloneDatabase from PRIMARY to standalone failed (document counts do not match)');
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- 'cloneDatabase from PRIMARY to standalone failed (count on view incorrect)');
-
- jsTest.log("Clone db from replica set SECONDARY to standalone server (should not copy)");
- standaloneDB.dropDatabase();
- standaloneDB.cloneDatabase(secondary.host);
- assert.eq(
- 0,
- standaloneDB[testColName].find().itcount(),
- 'cloneDatabase from SECONDARY to standalone copied documents without slaveOk: true');
-
- jsTest.log("Clone db from replica set SECONDARY to standalone server using slaveOk");
- standaloneDB.dropDatabase();
- standaloneDB.runCommand({clone: secondary.host, slaveOk: true});
- assert.eq(
- numDocs,
- standaloneDB[testColName].find().itcount(),
- 'cloneDatabase from SECONDARY to standalone failed (document counts do not match)');
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- 'cloneDatabase from SECONDARY to standalone failed (count on view incorrect)');
-
- jsTest.log("Switch db and insert data into standalone server");
- masterDB = master.getDB(standaloneDBName);
- var secondaryDB = secondary.getDB(standaloneDBName);
- standaloneDB = standalone.getDB(standaloneDBName);
- masterDB.dropDatabase();
- secondaryDB.dropDatabase();
- standaloneDB.dropDatabase();
-
- bulk = standaloneDB[testColName].initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
- }
- assert.writeOK(bulk.execute());
-
- assert.commandWorked(standaloneDB.runCommand({create: testViewName, viewOn: testColName}));
-
- jsTest.log("Clone db from standalone server to replica set PRIMARY");
- masterDB.cloneDatabase(standalone.host);
- replTest.awaitReplication();
- assert.eq(numDocs,
- masterDB[testColName].find().itcount(),
- 'cloneDatabase from standalone to PRIMARY failed (document counts do not match)');
- assert.eq(numDocs,
- masterDB[testViewName].find().itcount(),
- 'cloneDatabase from standalone to PRIMARY failed (count on view incorrect)');
-
- jsTest.log("Clone db from standalone server to replica set SECONDARY");
- masterDB.dropDatabase();
- replTest.awaitReplication();
- secondaryDB.cloneDatabase(standalone.host);
- assert.eq(
- 0,
- secondaryDB[testColName].find().itcount(),
- 'cloneDatabase from standalone to SECONDARY succeeded and should not accept writes');
-
- jsTest.log("Shut down replica set and standalone server");
- MongoRunner.stopMongod(standalone);
-
- replTest.stopSet();
- }
-
-})();
diff --git a/jstests/replsets/copydb.js b/jstests/replsets/copydb.js
deleted file mode 100644
index 2c11e553618..00000000000
--- a/jstests/replsets/copydb.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Tests the copydb command in a replica set.
-// Ensures that documents and indexes are replicated to secondary.
-
-(function() {
- 'use strict';
-
- var replTest = new ReplSetTest({name: 'copydbTest', nodes: 3});
-
- replTest.startSet();
- replTest.initiate();
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
-
- var sourceDBName = 'copydb-repl-test-source';
- var targetDBName = 'copydb-repl-test-target';
-
- var primarySourceDB = primary.getDB(sourceDBName);
- assert.commandWorked(primarySourceDB.dropDatabase(),
- 'failed to drop source database ' + sourceDBName + ' on primary');
-
- var primaryTargetDB = primary.getDB(targetDBName);
- assert.commandWorked(primaryTargetDB.dropDatabase(),
- 'failed to drop target database ' + targetDBName + ' on primary');
-
- assert.writeOK(primarySourceDB.foo.save({a: 1}),
- 'failed to insert document in source collection');
- assert.commandWorked(primarySourceDB.foo.ensureIndex({a: 1}),
- 'failed to create index in source collection on primary');
- assert.commandWorked(primarySourceDB.runCommand({create: "fooView", viewOn: "foo"}),
- 'failed to create view on source collection on primary');
-
- assert.eq(1,
- primarySourceDB.foo.find().itcount(),
- 'incorrect number of documents in source collection on primary before copy');
- assert.eq(1,
- primarySourceDB.fooView.find().itcount(),
- 'incorrect number of documents in source view on primary before copy');
- assert.eq(0,
- primaryTargetDB.foo.find().itcount(),
- 'target collection on primary should be empty before copy');
-
- assert.commandWorked(
- primarySourceDB.copyDatabase(primarySourceDB.getName(), primaryTargetDB.getName()),
- 'failed to copy database');
-
- assert.eq(primarySourceDB.foo.find().itcount(),
- primaryTargetDB.foo.find().itcount(),
- 'incorrect number of documents in target collection on primary after copy');
-
- // Confirm that 'fooView' is still a view namespace after copy.
- let res = primaryTargetDB.runCommand({listCollections: 1, filter: {name: "fooView"}});
- assert.commandWorked(res);
- assert(res.cursor.firstBatch.length === 1);
- assert(res.cursor.firstBatch[0].hasOwnProperty("type"), tojson(res));
- assert.eq("view",
- res.cursor.firstBatch[0].type,
- "Namespace exected to be view: " + tojson(res.cursor.firstBatch[0]));
-
- assert.eq(primarySourceDB.fooView.find().itcount(),
- primaryTargetDB.fooView.find().itcount(),
- 'incorrect number of documents in target view on primary after copy');
-
- assert.eq(primarySourceDB.foo.getIndexes().length,
- primaryTargetDB.foo.getIndexes().length,
- 'incorrect number of indexes in target collection on primary after copy');
-
- replTest.awaitReplication();
-
- var secondarySourceDB = secondary.getDB(sourceDBName);
-
- assert.eq(primarySourceDB.foo.find().itcount(),
- secondarySourceDB.foo.find().itcount(),
- 'incorrect number of documents in source collection on secondary after copy');
-
- assert.eq(primarySourceDB.fooView.find().itcount(),
- secondarySourceDB.fooView.find().itcount(),
- 'incorrect number of documents in source view on secondary after copy');
-
- assert.eq(primarySourceDB.foo.getIndexes().length,
- secondarySourceDB.foo.getIndexes().length,
- 'incorrect number of indexes in source collection on secondary after copy');
-
- var secondaryTargetDB = secondary.getDB(targetDBName);
-
- assert.eq(primaryTargetDB.foo.find().itcount(),
- secondaryTargetDB.foo.find().itcount(),
- 'incorrect number of documents in target collection on secondary after copy');
-
- assert.eq(primaryTargetDB.fooView.find().itcount(),
- secondaryTargetDB.fooView.find().itcount(),
- 'incorrect number of documents in target view on secondary after copy');
-
- assert.eq(primaryTargetDB.foo.getIndexes().length,
- secondaryTargetDB.foo.getIndexes().length,
- 'incorrect number of indexes in target collection on secondary after copy');
- replTest.stopSet();
-}());
diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js
index 22a512f19d0..5a0a1b95562 100644
--- a/jstests/replsets/localhostAuthBypass.js
+++ b/jstests/replsets/localhostAuthBypass.js
@@ -43,11 +43,8 @@ var assertCannotRunCommands = function(mongo, isPrimary) {
{out: "other"});
});
- // DB operations
- var authorizeErrorCode = 13;
- assert.commandFailedWithCode(
- mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
// Create collection
+ var authorizeErrorCode = 13;
assert.commandFailedWithCode(
mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
authorizeErrorCode,
diff --git a/jstests/sharding/auth_copydb.js b/jstests/sharding/auth_copydb.js
deleted file mode 100644
index 1c732546b2b..00000000000
--- a/jstests/sharding/auth_copydb.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Tests the copydb command on mongos with auth
-(function() {
- 'use strict';
- load('jstests/libs/feature_compatibility_version.js');
-
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- var st = new ShardingTest(
- {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}});
- var mongos = st.s0;
- var destAdminDB = mongos.getDB('admin');
- var destTestDB = mongos.getDB('test');
-
- var sourceMongodConn = MongoRunner.runMongod({});
- var sourceTestDB = sourceMongodConn.getDB('test');
-
- // Ensure sourceMongodConn has featureCompatibilityVersion=lastStableFCV so that the sharded
- // cluster can communicate with it if it has featureCompatibilityVersion=lastStableFCV
- assert.commandWorked(
- sourceMongodConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- sourceTestDB.foo.insert({a: 1});
-
- destAdminDB.createUser({
- user: 'admin',
- pwd: 'password',
- roles: jsTest.adminUserRoles
- }); // Turns on access control enforcement
-
- jsTestLog("Running copydb that should fail");
- var res = destAdminDB.runCommand(
- {copydb: 1, fromhost: sourceMongodConn.host, fromdb: 'test', todb: 'test'});
- printjson(res);
- assert.commandFailed(res);
-
- destAdminDB.auth('admin', 'password');
- assert.eq(0, destTestDB.foo.count()); // Be extra sure the copydb didn't secretly succeed.
-
- jsTestLog("Running copydb that should succeed");
- res = destAdminDB.runCommand(
- {copydb: 1, fromhost: sourceMongodConn.host, fromdb: 'test', todb: 'test'});
- printjson(res);
- assert.commandWorked(res);
-
- assert.eq(1, destTestDB.foo.count());
- assert.eq(1, destTestDB.foo.findOne().a);
-
- st.stop();
- MongoRunner.stopMongod(sourceMongodConn);
-})();
diff --git a/jstests/sharding/copydb_from_mongos.js b/jstests/sharding/copydb_from_mongos.js
deleted file mode 100644
index 66db42407ca..00000000000
--- a/jstests/sharding/copydb_from_mongos.js
+++ /dev/null
@@ -1,22 +0,0 @@
-(function() {
-
- var st = new ShardingTest({shards: 1});
-
- var testDB = st.s.getDB('test');
- assert.writeOK(testDB.foo.insert({a: 1}));
-
- var res =
- testDB.adminCommand({copydb: 1, fromhost: st.s.host, fromdb: 'test', todb: 'test_copy'});
- assert.commandWorked(res);
-
- var copy = st.s.getDB('test_copy');
- assert.eq(1, copy.foo.count());
- assert.eq(1, copy.foo.findOne().a);
-
- // Test invalid todb database name.
- assert.commandFailed(testDB.adminCommand(
- {copydb: 1, fromhost: st.s.host, fromdb: 'test_copy', todb: 'test/copy'}));
-
- st.stop();
-
-})();
diff --git a/jstests/sharding/database_and_shard_versioning_all_commands.js b/jstests/sharding/database_and_shard_versioning_all_commands.js
index ec7a1c564fe..09b60667d4c 100644
--- a/jstests/sharding/database_and_shard_versioning_all_commands.js
+++ b/jstests/sharding/database_and_shard_versioning_all_commands.js
@@ -6,20 +6,13 @@
'use strict';
load('jstests/libs/profiler.js');
+ load('jstests/sharding/libs/last_stable_mongos_commands.js');
const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
const SHARD_VERSION_UNSHARDED = [Timestamp(0, 0), ObjectId("000000000000000000000000")];
- // These commands exist in the 4.0 mongo shell, so we must define a test case in mixed version
- // suites. However, a check exists in this test that asserts that every command tested exists
- // on mongos. In an all-4.2 environment, these commands won't exist. To increase test coverage,
- // and allow us to run on same- and mixed-version suites, we will allow these commands to have
- // a test defined without always existing on the mongos being used.
- const fcv40OnlyCommands =
- ['abortTransaction', 'commitTransaction', 'eval', 'geoNear', 'group', 'reIndex'];
-
function validateTestCase(testCase) {
assert(testCase.skip || testCase.command,
"must specify exactly one of 'skip' or 'command' for test case " + tojson(testCase));
@@ -115,7 +108,6 @@
assert(mongosConn.getDB(dbName).getCollection(collName).drop());
}
},
- copydb: {skip: "not allowed through mongos"},
count: {
sendsDbVersion: true,
sendsShardVersion: true,
@@ -207,7 +199,6 @@
echo: {skip: "does not forward command to primary shard"},
enableSharding: {skip: "does not forward command to primary shard"},
endSessions: {skip: "goes through the cluster write path"},
- eval: {skip: "must define test coverage for 4.0 backwards compatibility"},
explain: {skip: "TODO SERVER-31226"},
features: {skip: "executes locally on mongos (not sent to any remote node)"},
filemd5: {
@@ -227,7 +218,6 @@
},
flushRouterConfig: {skip: "executes locally on mongos (not sent to any remote node)"},
fsync: {skip: "broadcast to all shards"},
- geoNear: {skip: "must define test coverage for 4.0 backwards compatibility"},
getCmdLineOpts: {skip: "executes locally on mongos (not sent to any remote node)"},
getDiagnosticData: {skip: "executes locally on mongos (not sent to any remote node)"},
getLastError: {skip: "does not forward command to primary shard"},
@@ -241,7 +231,6 @@
grantPrivilegesToRole: {skip: "always targets the config server"},
grantRolesToRole: {skip: "always targets the config server"},
grantRolesToUser: {skip: "always targets the config server"},
- group: {skip: "must define test coverage for 4.0 backwards compatibility"},
hostInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
insert: {
sendsDbVersion: false,
@@ -361,7 +350,6 @@
refreshLogicalSessionCacheNow: {skip: "goes through the cluster write path"},
refreshSessions: {skip: "executes locally on mongos (not sent to any remote node)"},
refreshSessionsInternal: {skip: "executes locally on mongos (not sent to any remote node)"},
- reIndex: {skip: "must define test coverage for 4.0 backwards compatibility"},
removeShard: {skip: "not on a user database"},
removeShardFromZone: {skip: "not on a user database"},
renameCollection: {
@@ -432,6 +420,10 @@
whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"},
};
+ commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+ });
+
class AllCommandsTestRunner {
constructor() {
this.st = new ShardingTest(this.getShardingTestOptions());
@@ -548,7 +540,15 @@
// After iterating through all the existing commands, ensure there were no additional
// test cases that did not correspond to any mongos command.
for (let key of Object.keys(testCases)) {
- if (fcv40OnlyCommands.includes(key)) {
+ // We have defined real test cases for commands added in 4.2 so that the test cases
+ // are exercised in the regular suites, but because these test cases can't run in
+ // the last stable suite, we skip processing them here to avoid failing the below
+ // assertion. We have defined "skip" test cases for commands removed in 4.2 so the
+ // test case is defined in last stable suites (in which these commands still exist
+ // on the mongos), but these test cases won't be run in regular suites, so we skip
+ // processing them below as well.
+ if (commandsAddedToMongosIn42.includes(key) ||
+ commandsRemovedFromMongosIn42.includes(key)) {
continue;
}
assert(testCases[key].validated || testCases[key].conditional,
diff --git a/jstests/sharding/libs/last_stable_mongos_commands.js b/jstests/sharding/libs/last_stable_mongos_commands.js
new file mode 100644
index 00000000000..43a1da0acfa
--- /dev/null
+++ b/jstests/sharding/libs/last_stable_mongos_commands.js
@@ -0,0 +1,16 @@
+// These commands were removed from mongos 4.2, but will still appear in the listCommands output
+// of a 4.0 mongos. A last-stable mongos will be unable to run a command on a latest version shard
+// that no longer supports that command. To increase test coverage and allow us to run on same- and
+// mixed-version suites, we allow these commands to have a test defined without always existing on
+// the servers being used.
+const commandsRemovedFromMongosIn42 = [
+ 'copydb',
+ 'eval',
+ 'geoNear',
+ 'group',
+ 'reIndex',
+];
+// These commands were added in mongos 4.2, so will not appear in the listCommands output of a 4.0
+// mongos. We will allow these commands to have a test defined without always existing on the mongos
+// being used.
+const commandsAddedToMongosIn42 = ['abortTransaction', 'commitTransaction'];
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index d2035acdba2..996fb949175 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -95,9 +95,6 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
to: st.shard0.shardName // Arbitrary shard.
});
assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
- assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"),
- authorizeErrorCode,
- "copyDatabase");
// Create collection
assert.commandFailedWithCode(
mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 15fc549050e..25009589e67 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -88,7 +88,6 @@
// Take a copy of the config db, in order to test the harder-to-setup cases below.
// Copy into a standalone to also test running printShardingStatus() against a config dump.
- // TODO: Replace this manual copy with copydb once SERVER-13080 is fixed.
var config = mongos.getDB("config");
var configCopy = standalone.getDB("configCopy");
config.getCollectionInfos().forEach(function(c) {
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 65eb2e0c03b..e26497dfddd 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -20,6 +20,7 @@
"use strict";
load('jstests/libs/profiler.js');
+ load('jstests/sharding/libs/last_stable_mongos_commands.js');
let db = "test";
let coll = "foo";
@@ -105,7 +106,6 @@
connPoolSync: {skip: "does not return user data"},
connectionStatus: {skip: "does not return user data"},
convertToCapped: {skip: "primary only"},
- copydb: {skip: "primary only"},
copydbsaslstart: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
@@ -152,7 +152,6 @@
emptycapped: {skip: "primary only"},
enableSharding: {skip: "primary only"},
endSessions: {skip: "does not return user data"},
- eval: {skip: "must define test coverage for 4.0 backwards compatibility"},
explain: {skip: "TODO SERVER-30068"},
features: {skip: "does not return user data"},
filemd5: {skip: "does not return user data"},
@@ -172,7 +171,6 @@
forceerror: {skip: "does not return user data"},
fsync: {skip: "does not return user data"},
fsyncUnlock: {skip: "does not return user data"},
- geoNear: {skip: "must define test coverage for 4.0 backwards compatibility"},
geoSearch: {skip: "not supported in mongos"},
getCmdLineOpts: {skip: "does not return user data"},
getDiagnosticData: {skip: "does not return user data"},
@@ -188,7 +186,6 @@
grantPrivilegesToRole: {skip: "primary only"},
grantRolesToRole: {skip: "primary only"},
grantRolesToUser: {skip: "primary only"},
- group: {skip: "must define test coverage for 4.0 backwards compatibility"},
handshake: {skip: "does not return user data"},
hostInfo: {skip: "does not return user data"},
insert: {skip: "primary only"},
@@ -244,7 +241,6 @@
planCacheListQueryShapes: {skip: "does not return user data"},
planCacheSetFilter: {skip: "does not return user data"},
profile: {skip: "primary only"},
- reIndex: {skip: "does not return user data"},
reapLogicalSessionCacheNow: {skip: "does not return user data"},
refreshLogicalSessionCacheNow: {skip: "does not return user data"},
refreshSessions: {skip: "does not return user data"},
@@ -307,6 +303,10 @@
whatsmyuri: {skip: "does not return user data"}
};
+ commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+ });
+
let scenarios = {
dropRecreateAsUnshardedOnSameShard: function(
staleMongos, freshMongos, test, commandProfile) {
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 67270de532a..6b682d1609d 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -25,6 +25,7 @@
"use strict";
load('jstests/libs/profiler.js');
+ load('jstests/sharding/libs/last_stable_mongos_commands.js');
let db = "test";
let coll = "foo";
@@ -118,7 +119,6 @@
connPoolSync: {skip: "does not return user data"},
connectionStatus: {skip: "does not return user data"},
convertToCapped: {skip: "primary only"},
- copydb: {skip: "primary only"},
copydbsaslstart: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
@@ -176,7 +176,6 @@
emptycapped: {skip: "primary only"},
enableSharding: {skip: "primary only"},
endSessions: {skip: "does not return user data"},
- eval: {skip: "must define test coverage for 4.0 backwards compatibility"},
explain: {skip: "TODO SERVER-30068"},
features: {skip: "does not return user data"},
filemd5: {skip: "does not return user data"},
@@ -202,7 +201,6 @@
forceerror: {skip: "does not return user data"},
fsync: {skip: "does not return user data"},
fsyncUnlock: {skip: "does not return user data"},
- geoNear: {skip: "must define test coverage for 4.0 backwards compatibility"},
geoSearch: {skip: "not supported in mongos"},
getCmdLineOpts: {skip: "does not return user data"},
getDiagnosticData: {skip: "does not return user data"},
@@ -218,7 +216,6 @@
grantPrivilegesToRole: {skip: "primary only"},
grantRolesToRole: {skip: "primary only"},
grantRolesToUser: {skip: "primary only"},
- group: {skip: "must define test coverage for 4.0 backwards compatibility"},
handshake: {skip: "does not return user data"},
hostInfo: {skip: "does not return user data"},
insert: {skip: "primary only"},
@@ -279,7 +276,6 @@
planCacheListQueryShapes: {skip: "does not return user data"},
planCacheSetFilter: {skip: "does not return user data"},
profile: {skip: "primary only"},
- reIndex: {skip: "does not return user data"},
reapLogicalSessionCacheNow: {skip: "does not return user data"},
refreshLogicalSessionCacheNow: {skip: "does not return user data"},
refreshSessions: {skip: "does not return user data"},
@@ -342,6 +338,10 @@
whatsmyuri: {skip: "does not return user data"}
};
+ commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+ });
+
// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 1f372a38fed..03822856590 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -20,6 +20,7 @@
"use strict";
load('jstests/libs/profiler.js');
+ load('jstests/sharding/libs/last_stable_mongos_commands.js');
let db = "test";
let coll = "foo";
@@ -106,7 +107,6 @@
connPoolSync: {skip: "does not return user data"},
connectionStatus: {skip: "does not return user data"},
convertToCapped: {skip: "primary only"},
- copydb: {skip: "primary only"},
copydbsaslstart: {skip: "primary only"},
count: {
setUp: function(mongosConn) {
@@ -154,7 +154,6 @@
emptycapped: {skip: "primary only"},
enableSharding: {skip: "primary only"},
endSessions: {skip: "does not return user data"},
- eval: {skip: "must define test coverage for 4.0 backwards compatibility"},
explain: {skip: "TODO SERVER-30068"},
features: {skip: "does not return user data"},
filemd5: {skip: "does not return user data"},
@@ -175,7 +174,6 @@
forceerror: {skip: "does not return user data"},
fsync: {skip: "does not return user data"},
fsyncUnlock: {skip: "does not return user data"},
- geoNear: {skip: "must define test coverage for 4.0 backwards compatibility"},
geoSearch: {skip: "not supported in mongos"},
getCmdLineOpts: {skip: "does not return user data"},
getDiagnosticData: {skip: "does not return user data"},
@@ -191,7 +189,6 @@
grantPrivilegesToRole: {skip: "primary only"},
grantRolesToRole: {skip: "primary only"},
grantRolesToUser: {skip: "primary only"},
- group: {skip: "must define test coverage for 4.0 backwards compatibility"},
handshake: {skip: "does not return user data"},
hostInfo: {skip: "does not return user data"},
insert: {skip: "primary only"},
@@ -253,7 +250,6 @@
refreshLogicalSessionCacheNow: {skip: "does not return user data"},
refreshSessions: {skip: "does not return user data"},
refreshSessionsInternal: {skip: "does not return user data"},
- reIndex: {skip: "does not return user data"},
removeShard: {skip: "primary only"},
removeShardFromZone: {skip: "primary only"},
renameCollection: {skip: "primary only"},
@@ -312,6 +308,10 @@
whatsmyuri: {skip: "does not return user data"}
};
+ commandsRemovedFromMongosIn42.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+ });
+
// Set the secondaries to priority 0 and votes 0 to prevent the primaries from stepping down.
let rsOpts = {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]};
let st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});