summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2017-01-20 18:58:49 -0500
committerEsha Maharishi <esha.maharishi@mongodb.com>2017-01-23 15:19:28 -0500
commitac5d193edf5c1e170119871dd4bfdc5a839fc1cf (patch)
treea428118e57471324f362dd602306e375424e0fcc /jstests
parent7480e053bb992f869bf83c8e54ee088afa199bb9 (diff)
downloadmongo-ac5d193edf5c1e170119871dd4bfdc5a839fc1cf.tar.gz
SERVER-27625 remove dead ANSA and setShardVersion code
Diffstat (limited to 'jstests')
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/sharding/addshard2.js35
-rw-r--r--jstests/sharding/delayed_shard_identity_upsert.js131
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js6
-rw-r--r--jstests/sharding/shard_aware_on_config_election.js128
-rw-r--r--jstests/sharding/shard_aware_on_set_shard_version.js62
-rw-r--r--jstests/sharding/ssv_config_check.js16
7 files changed, 40 insertions, 340 deletions
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 361cb97c127..06655890f37 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -451,7 +451,7 @@
},
skipSharded: true,
expectFailure: true,
- expectedErrorCode: null,
+ expectedErrorCode: 193,
isAdminCommand: true,
},
splitVector: {
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index a7912c8762d..30b7d4365ff 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -51,18 +51,9 @@
}, "removeShard never completed for shard " + shardName);
};
- // Enable the failpoint that prevents the config server from upserting a shardIdentity on new
- // shards so that the same shard host can be re-used for multiple addShard calls without being
- // restarted in between each addShard (the shardIdentity cannot be deleted while the shard host
- // is running with --shardsvr).
var st = new ShardingTest({
shards: 0,
mongos: 1,
- other: {
- configOptions: {
- setParameter: "failpoint.dontUpsertShardIdentityOnNewShards={'mode':'alwaysOn'}"
- }
- }
});
// Add one shard since the last shard cannot be removed.
@@ -72,6 +63,13 @@
// Allocate a port that can be used to test adding invalid hosts.
var portWithoutHostRunning = allocatePort();
+ // Enable the failpoint that prevents the config server from upserting a shardIdentity on new
+ // shards so that the same shard host can be re-used for multiple addShard calls without being
+ // restarted in between each addShard (the shardIdentity cannot be deleted while the shard host
+ // is running with --shardsvr).
+ st.configRS.getPrimary().adminCommand(
+ {configureFailPoint: "dontUpsertShardIdentityOnNewShards", mode: "alwaysOn"});
+
// 1. Test adding a *standalone*
// 1.a. with or without specifying the shardName.
@@ -169,15 +167,34 @@
// 4. Test that a replica set whose *set name* is "admin" can be written to (SERVER-17232).
+ // Turn off the dontUpsertShardIdentityOnNewShards failpoint, since mongos will send
+ // setShardVersion when trying to do the write, and the setShardVersion will fail if the
+ // sharding state will not be enabled.
+ assert.commandWorked(st.configRS.getPrimary().adminCommand(
+ {configureFailPoint: "dontUpsertShardIdentityOnNewShards", mode: "off"}));
+
rst = new ReplSetTest({name: "admin", nodes: 1});
rst.startSet({shardsvr: ''});
rst.initiate();
jsTest.log("A replica set whose set name is 'admin' should be able to be written to.");
+
addShardRes = st.s.adminCommand({addShard: rst.getURL()});
assertAddShardSucceeded(addShardRes);
+
+ // Ensure the write goes to the newly added shard.
+ assert.commandWorked(st.s.getDB('test').runCommand({create: "foo"}));
+ var res = st.s.getDB('config').getCollection('databases').findOne({_id: 'test'});
+ assert.neq(null, res);
+ if (res.primary != addShardRes.shardAdded) {
+ assert.commandWorked(st.s.adminCommand({movePrimary: 'test', to: addShardRes.shardAdded}));
+ }
+
assert.writeOK(st.s.getDB('test').foo.insert({x: 1}));
+ assert.neq(null, rst.getPrimary().getDB('test').foo.findOne());
+
assert.commandWorked(st.s.getDB('test').runCommand({dropDatabase: 1}));
+
removeShardWithName(addShardRes.shardAdded);
rst.stopSet();
diff --git a/jstests/sharding/delayed_shard_identity_upsert.js b/jstests/sharding/delayed_shard_identity_upsert.js
deleted file mode 100644
index d2d3a59054f..00000000000
--- a/jstests/sharding/delayed_shard_identity_upsert.js
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Tests that a variety of operations from a mongos to a shard succeed even during the period when
- * the shard has yet to receive the shardIdentity from the config server.
- */
-(function() {
- 'use strict';
-
- // Simulate that the insert of the shardIdentity doc from the config to a new shard gets
- // "delayed" by using the dontUpsertShardIdentityOnNewShards failpoint on the configs.
- var st = new ShardingTest({
- shards: 3,
- mongos: 1,
- other: {
- rs: true,
- rsOptions: {nodes: 1},
- configOptions: {
- setParameter: "failpoint.dontUpsertShardIdentityOnNewShards={'mode':'alwaysOn'}"
- }
- }
- });
-
- var testDB = st.s.getDB("test");
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
-
- // Create a collection sharded on {a: 1}. Add 2dsphere index to test geoNear.
- var coll = testDB.getCollection("sharded");
- coll.drop();
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({geo: "2dsphere"}));
- assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
-
- // Split the collection.
- // shard0000: { "a" : { "$minKey" : 1 } } -->> { "a" : 1 }
- // shard0001: { "a" : 1 } -->> { "a" : 10 }
- // shard0002: { "a" : 10 } -->> { "a" : { "$maxKey" : 1 }}
- var chunk2Min = 1;
- var chunk3Min = 10;
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: chunk2Min}}));
- assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: chunk3Min}}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 5}, to: st.shard1.shardName}));
- assert.commandWorked(testDB.adminCommand(
- {moveChunk: coll.getFullName(), find: {a: 15}, to: st.shard2.shardName}));
-
- // Put data on each shard.
- // Note that the balancer is off by default, so the chunks will stay put.
- // shard0000: {a: 0}
- // shard0001: {a: 2}, {a: 4}
- // shard0002: {a: 15}
- // Include geo field to test geoNear.
- var a_0 = {_id: 0, a: 0, geo: {type: "Point", coordinates: [0, 0]}};
- var a_2 = {_id: 1, a: 2, geo: {type: "Point", coordinates: [0, 0]}};
- var a_4 = {_id: 2, a: 4, geo: {type: "Point", coordinates: [0, 0]}};
- var a_15 = {_id: 3, a: 15, geo: {type: "Point", coordinates: [0, 0]}};
- assert.writeOK(coll.insert(a_0));
- assert.writeOK(coll.insert(a_2));
- assert.writeOK(coll.insert(a_4));
- assert.writeOK(coll.insert(a_15));
-
- // Aggregate and aggregate explain.
- assert.eq(3, coll.aggregate([{$match: {a: {$lt: chunk3Min}}}]).itcount());
- assert.commandWorked(coll.explain().aggregate([{$match: {a: {$lt: chunk3Min}}}]));
-
- // Count and count explain.
- assert.eq(3, coll.find({a: {$lt: chunk3Min}}).count());
- assert.commandWorked(coll.explain().find({a: {$lt: chunk3Min}}).count());
-
- // Distinct and distinct explain.
- assert.eq(3, coll.distinct("_id", {a: {$lt: chunk3Min}}).length);
- assert.commandWorked(coll.explain().distinct("_id", {a: {$lt: chunk3Min}}));
-
- // Find and find explain.
- assert.eq(3, coll.find({a: {$lt: chunk3Min}}).itcount());
- assert.commandWorked(coll.find({a: {$lt: chunk3Min}}).explain());
-
- // FindAndModify and findAndModify explain.
- assert.eq(0, coll.findAndModify({query: {a: 0}, update: {$set: {b: 1}}}).a);
- assert.commandWorked(coll.explain().findAndModify({query: {a: 0}, update: {$set: {b: 1}}}));
-
- // GeoNear.
- assert.eq(3,
- assert
- .commandWorked(testDB.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {a: {$lt: chunk3Min}},
- }))
- .results.length);
-
- // MapReduce.
- assert.eq(3,
- assert
- .commandWorked(coll.mapReduce(
- function() {
- emit(this.a, 1);
- },
- function(key, values) {
- return Array.sum(values);
- },
- {out: {inline: 1}, query: {a: {$lt: chunk3Min}}}))
- .results.length);
-
- // Remove and remove explain.
- var writeRes = coll.remove({a: {$lt: chunk3Min}});
- assert.writeOK(writeRes);
- assert.eq(3, writeRes.nRemoved);
- assert.commandWorked(coll.explain().remove({a: {$lt: chunk3Min}}));
- assert.writeOK(coll.insert(a_0));
- assert.writeOK(coll.insert(a_2));
- assert.writeOK(coll.insert(a_4));
-
- // Update and update explain.
- writeRes = coll.update({a: {$lt: chunk3Min}}, {$set: {b: 1}}, {multi: true});
- assert.writeOK(writeRes);
- assert.eq(3, writeRes.nMatched);
- assert.commandWorked(
- coll.explain().update({a: {$lt: chunk3Min}}, {$set: {b: 1}}, {multi: true}));
-
- // Assert that the shardIdentity document has still not "reached" any shard, meaning all of the
- // above commands indeed succeeded during the period that the shardIdentity insert was
- // "delayed."
- for (shard in st.shards) {
- var res = shard.getDB("admin").getCollection("system.version").find({_id: "shardIdentity"});
- assert.eq(null, res);
- }
-
- st.stop();
-
-})();
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index e619af509e1..efb84783bd4 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -25,12 +25,12 @@
// The cluster now has the shard information. Then kill the replica set so when mongos restarts
// and tries to create a ReplSetMonitor for that shard, it will not be able to connect to any of
// the seed servers.
- replTest.stopSet();
+ // Don't clear the data directory so that the shardIdentity is not deleted.
+ replTest.stopSet(undefined /* send default signal */, true /* don't clear data directory */);
st.restartMongos(0);
- replTest.startSet({restart: true});
- replTest.initiate();
+ replTest.startSet({restart: true, noCleanData: true});
replTest.awaitSecondaryNodes();
// Verify that the replSetMonitor can reach the restarted set
diff --git a/jstests/sharding/shard_aware_on_config_election.js b/jstests/sharding/shard_aware_on_config_election.js
deleted file mode 100644
index a885a37455a..00000000000
--- a/jstests/sharding/shard_aware_on_config_election.js
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Tests that, on transition to primary, a config server initializes sharding awareness on all
- * shards not marked as sharding aware in config.shards.
- *
- * This test restarts shard and config server nodes.
- * @tags: [requires_persistence]
- */
-
-(function() {
- "use strict";
-
- var waitForIsMaster = function(conn) {
- assert.soon(function() {
- var res = conn.getDB('admin').runCommand({isMaster: 1});
- return res.ismaster;
- });
- };
-
- var checkShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
- var res = conn.getDB('admin').runCommand({shardingState: 1});
- assert.commandWorked(res);
- assert(res.enabled);
- assert.eq(configConnStr, res.configServer);
- assert.eq(shardName, res.shardName);
- assert(clusterId.equals(res.clusterId),
- 'cluster id: ' + tojson(clusterId) + ' != ' + tojson(res.clusterId));
- };
-
- var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
- var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
- assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
- assert.eq(1, res.state);
- };
-
- var waitUntilShardingStateInitialized = function(conn, configConnStr, shardName, clusterId) {
- assert.soon(function() {
- var res = conn.getDB('admin').runCommand({shardingState: 1});
- assert.commandWorked(res);
- if (res.enabled && (configConnStr === res.configServer) &&
- (shardName === res.shardName) && (clusterId.equals(res.clusterId))) {
- return true;
- }
- return false;
- });
- };
-
- var waitUntilShardMarkedAsShardAware = function(mongosConn, shardName) {
- assert.soon(function() {
- var res = mongosConn.getDB('config').getCollection('shards').findOne({_id: shardName});
- assert.neq(null, res, "Could not find new shard " + shardName + " in config.shards");
- if (res.state && res.state === 1) {
- return true;
- }
- return false;
- });
- };
-
- var numShards = 2;
- var st = new ShardingTest({shards: numShards, other: {rs: true}});
- var clusterId = st.s.getDB('config').getCollection('version').findOne().clusterId;
-
- var restartedShards = [];
- for (var i = 0; i < numShards; i++) {
- var rst = st["rs" + i];
-
- jsTest.log("Assert that shard " + rst.name +
- " is sharding aware and was marked as sharding aware in config.shards");
- checkShardingStateInitialized(rst.getPrimary(), st.configRS.getURL(), rst.name, clusterId);
- checkShardMarkedAsShardAware(st.s, rst.name);
-
- jsTest.log("Restart " + rst.name + " without --shardsvr to clear its sharding awareness");
- for (var nodeId = 0; nodeId < rst.nodes.length; nodeId++) {
- var rstOpts = rst.nodes[nodeId].fullOptions;
- delete rstOpts.shardsvr;
- rst.restart(nodeId, rstOpts);
- }
- rst.awaitNodesAgreeOnPrimary();
-
- jsTest.log("Manually delete the shardIdentity document from " + rst.name);
- // Use writeConcern: { w: majority } so that the write cannot be lost when the shard is
- // restarted again with --shardsvr.
- assert.writeOK(rst.getPrimary()
- .getDB("admin")
- .getCollection("system.version")
- .remove({"_id": "shardIdentity"}, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Manually unset the state field from " + rst.name + "'s entry in config.shards");
- // Use writeConcern: { w: majority } so that the write cannot be rolled back when the
- // current primary is stepped down.
- assert.writeOK(st.s.getDB("config").getCollection("shards").update(
- {"_id": rst.name}, {$unset: {"state": ""}}, {writeConcern: {w: "majority"}}));
-
- // Make sure shardIdentity delete replicated to all nodes before restarting them with
- // --shardsvr since if they try to replicate that delete while runnning with --shardsvr
- // they will crash.
- rst.awaitReplication();
- jsTest.log("Restart " + rst.name +
- " with --shardsvr to allow initializing its sharding awareness");
- for (var nodeId = 0; nodeId < rst.nodes.length; nodeId++) {
- var rstOpts = rst.nodes[nodeId].fullOptions;
- rstOpts.shardsvr = "";
- rst.restart(nodeId, rstOpts);
- }
- rst.awaitNodesAgreeOnPrimary();
- }
-
- jsTest.log("Step down the primary config server");
- // Step down the primary config server so that the newly elected primary performs sharding
- // initialization on shards not marked as shard aware.
- assert.throws(function() {
- st.configRS.getPrimary().getDB("admin").runCommand({replSetStepDown: 10});
- });
-
- jsTest.log("Wait for a new primary config server to be elected.");
- st.configRS.awaitNodesAgreeOnPrimary();
-
- for (var i = 0; i < numShards; i++) {
- var rst = st["rs" + i];
- jsTest.log("Assert that shard " + rst.name +
- " became sharding aware and marked as sharding aware in config.shards again");
- waitUntilShardingStateInitialized(
- rst.getPrimary(), st.configRS.getURL(), rst.name, clusterId);
- waitUntilShardMarkedAsShardAware(st.s, rst.name);
- }
-
- st.stop();
-
-})();
diff --git a/jstests/sharding/shard_aware_on_set_shard_version.js b/jstests/sharding/shard_aware_on_set_shard_version.js
deleted file mode 100644
index 94d7e081097..00000000000
--- a/jstests/sharding/shard_aware_on_set_shard_version.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Tests the correctness of sharding initialization through setShardVersion.
- *
- * Though sharding initialization is typically done:
- *
- * 1) when the config server inserts the shardIdentity document on a new shard, or
- * 2) when the shard starts up with a shardIdentity document already on disk
- *
- * the initialization may be done through setShardVersion if a sharded connection from a mongos or
- * config is made to the new shard before the shardIdentity insert triggers sharding initialization.
- */
-(function() {
- 'use strict';
-
- // Prevent a config primary from upserting the shardIdentity document into the shards by using
- // the dontUpsertShardIdentityOnNewShards failpoint.
- var st = new ShardingTest({
- shards: 1,
- mongos: 1,
- other: {
- rs: true,
- rsOptions: {nodes: 1},
- configOptions: {
- setParameter:
- {"failpoint.dontUpsertShardIdentityOnNewShards": "{'mode':'alwaysOn'}"}
- }
- }
- });
-
- st.configRS.awaitReplication();
- var configVersion = st.s.getDB('config').getCollection('version').findOne();
- assert.neq(null, configVersion);
- var clusterId = configVersion.clusterId;
- assert.neq(null, clusterId);
-
- // The balancer, even when disabled, initiates a sharded connection to each new shard through
- // its periodic check that no shards' process OIDs clash. Expect that this check will send
- // setShardVersion and trigger sharding initialization on the new shard soon.
- var fiveMinutes = 30000;
- assert.soon(function() {
- var res = st.rs0.getPrimary().adminCommand({shardingState: 1});
- assert.commandWorked(res);
- if (res.enabled) {
- // If sharding state was initialized, make sure all fields are correct. Note, the
- // clusterId field is not initialized through setShardVersion.
- return (st.configRS.getURL() === res.configServer) && (st.rs0.name === res.shardName) &&
- (!clusterId.equals(res.clusterId));
- } else {
- return false;
- }
- }, "Shard failed to initialize sharding awareness after being added as a shard", fiveMinutes);
-
- // Assert that the shardIdentity document was not somehow inserted on the shard, triggering
- // sharding initialization unexpectedly.
- var res = st.rs0.getPrimary().getDB("admin").getCollection("system.version").findOne({
- _id: "shardIdentity"
- });
- assert.eq(null, res);
-
- st.stop();
-
-})();
diff --git a/jstests/sharding/ssv_config_check.js b/jstests/sharding/ssv_config_check.js
index b909f98f1f6..969e915aaea 100644
--- a/jstests/sharding/ssv_config_check.js
+++ b/jstests/sharding/ssv_config_check.js
@@ -11,7 +11,6 @@
testDB.adminCommand({enableSharding: 'test'});
testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- // Initialize version on shard.
testDB.user.insert({x: 1});
var directConn = new Mongo(st.d0.host);
@@ -22,6 +21,7 @@
var shardDoc = st.s.getDB('config').shards.findOne();
+ jsTest.log("Verify that the obsolete init form of setShardVersion succeeds on shards.");
assert.commandWorked(adminDB.runCommand({
setShardVersion: '',
init: true,
@@ -31,14 +31,18 @@
shardHost: shardDoc.host
}));
- assert.commandFailed(adminDB.runCommand({
+ var configAdmin = st.c0.getDB('admin');
+
+ jsTest.log("Verify that setShardVersion fails on the config server");
+ // Even if shardName sent is 'config' and connstring sent is config server's actual connstring.
+ assert.commandFailedWithCode(configAdmin.runCommand({
setShardVersion: '',
init: true,
authoritative: true,
- configdb: 'bad-rs/local:12,local:34',
- shard: shardDoc._id,
- shardHost: shardDoc.host
- }));
+ configdb: configStr,
+ shard: 'config'
+ }),
+ ErrorCodes.NoShardingEnabled);
st.stop();
})();